id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_2484_0
/* * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Modified by Cort Dougan (cort@cs.nmt.edu) * and Paul Mackerras (paulus@samba.org) */ /* * This file handles the architecture-dependent parts of hardware exceptions */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/a.out.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/module.h> #include <linux/prctl.h> #include <linux/delay.h> #include <linux/kprobes.h> #include <linux/kexec.h> #include <linux/backlight.h> #include <asm/kdebug.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/pmc.h> #ifdef CONFIG_PPC32 #include <asm/reg.h> #endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #ifdef CONFIG_PPC64 #include <asm/firmware.h> #include <asm/processor.h> #endif #include <asm/kexec.h> #ifdef CONFIG_PPC64 /* XXX */ #define _IO_BASE pci_io_base #endif #ifdef CONFIG_DEBUGGER int (*__debugger)(struct pt_regs *regs); int (*__debugger_ipi)(struct pt_regs *regs); int (*__debugger_bpt)(struct pt_regs *regs); int (*__debugger_sstep)(struct pt_regs *regs); int (*__debugger_iabr_match)(struct pt_regs *regs); int (*__debugger_dabr_match)(struct pt_regs *regs); int (*__debugger_fault_handler)(struct pt_regs *regs); EXPORT_SYMBOL(__debugger); EXPORT_SYMBOL(__debugger_ipi); EXPORT_SYMBOL(__debugger_bpt); EXPORT_SYMBOL(__debugger_sstep); EXPORT_SYMBOL(__debugger_iabr_match); EXPORT_SYMBOL(__debugger_dabr_match); EXPORT_SYMBOL(__debugger_fault_handler); #endif ATOMIC_NOTIFIER_HEAD(powerpc_die_chain); int register_die_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&powerpc_die_chain, nb); } EXPORT_SYMBOL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&powerpc_die_chain, nb); } EXPORT_SYMBOL(unregister_die_notifier); /* * Trap & Exception support */ static DEFINE_SPINLOCK(die_lock); int die(const char *str, struct pt_regs *regs, long err) { static int die_counter; if (debugger(regs)) return 1; console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); #ifdef CONFIG_PMAC_BACKLIGHT mutex_lock(&pmac_backlight_mutex); if (machine_is(powermac) && pmac_backlight) { struct backlight_properties *props; down(&pmac_backlight->sem); props = pmac_backlight->props; props->brightness = props->max_brightness; props->power = FB_BLANK_UNBLANK; props->update_status(pmac_backlight); up(&pmac_backlight->sem); } mutex_unlock(&pmac_backlight_mutex); #endif printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif #ifdef CONFIG_SMP printk("SMP NR_CPUS=%d ", NR_CPUS); #endif #ifdef CONFIG_DEBUG_PAGEALLOC printk("DEBUG_PAGEALLOC "); #endif #ifdef CONFIG_NUMA printk("NUMA "); #endif printk("%s\n", ppc_md.name ? "" : ppc_md.name); print_modules(); show_regs(regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); if (kexec_should_crash(current) || kexec_sr_activated(smp_processor_id())) crash_kexec(regs); crash_kexec_secondary(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); do_exit(err); return 0; } void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { siginfo_t info; if (!user_mode(regs)) { if (die("Exception in kernel mode", regs, signr)) return; } memset(&info, 0, sizeof(info)); info.si_signo = signr; info.si_code = code; info.si_addr = (void __user *) addr; force_sig_info(signr, &info, current); /* * Init gets no signals that it doesn't have a handler for. * That's all very well, but if it has caused a synchronous * exception and we ignore the resulting signal, it will just * generate the same exception over and over again and we get * nowhere. Better to kill it and let the kernel panic. */ if (current->pid == 1) { __sighandler_t handler; spin_lock_irq(&current->sighand->siglock); handler = current->sighand->action[signr-1].sa.sa_handler; spin_unlock_irq(&current->sighand->siglock); if (handler == SIG_DFL) { /* init has generated a synchronous exception and it doesn't have a handler for the signal */ printk(KERN_CRIT "init has generated signal %d " "but has no handler for it\n", signr); do_exit(signr); } } } #ifdef CONFIG_PPC64 void system_reset_exception(struct pt_regs *regs) { /* See if any machine dependent calls */ if (ppc_md.system_reset_exception) { if (ppc_md.system_reset_exception(regs)) return; } #ifdef CONFIG_KEXEC cpu_set(smp_processor_id(), cpus_in_sr); #endif die("System Reset", regs, SIGABRT); /* * Some CPUs when released from the debugger will execute this path. * These CPUs entered the debugger via a soft-reset. If the CPU was * hung before entering the debugger it will return to the hung * state when exiting this function. This causes a problem in * kdump since the hung CPU(s) will not respond to the IPI sent * from kdump. To prevent the problem we call crash_kexec_secondary() * here. If a kdump had not been initiated or we exit the debugger * with the "exit and recover" command (x) crash_kexec_secondary() * will return after 5ms and the CPU returns to its previous state. */ crash_kexec_secondary(regs); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable System Reset"); /* What should we do here? We could issue a shutdown or hard reset. */ } #endif /* * I/O accesses can cause machine checks on powermacs. * Check if the NIP corresponds to the address of a sync * instruction for which there is an entry in the exception * table. * Note that the 601 only takes a machine check on TEA * (transfer error ack) signal assertion, and does not * set any of the top 16 bits of SRR1. * -- paulus. */ static inline int check_io_access(struct pt_regs *regs) { #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) unsigned long msr = regs->msr; const struct exception_table_entry *entry; unsigned int *nip = (unsigned int *)regs->nip; if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) && (entry = search_exception_tables(regs->nip)) != NULL) { /* * Check that it's a sync instruction, or somewhere * in the twi; isync; nop sequence that inb/inw/inl uses. * As the address is in the exception table * we should be able to read the instr there. * For the debug message, we look at the preceding * load or store. */ if (*nip == 0x60000000) /* nop */ nip -= 2; else if (*nip == 0x4c00012c) /* isync */ --nip; if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { /* sync or twi */ unsigned int rb; --nip; rb = (*nip >> 11) & 0x1f; printk(KERN_DEBUG "%s bad port %lx at %p\n", (*nip & 0x100)? "OUT to": "IN from", regs->gpr[rb] - _IO_BASE, nip); regs->msr |= MSR_RI; regs->nip = entry->fixup; return 1; } } #endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */ return 0; } #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) /* On 4xx, the reason for the machine check or program exception is in the ESR. */ #define get_reason(regs) ((regs)->dsisr) #ifndef CONFIG_FSL_BOOKE #define get_mc_reason(regs) ((regs)->dsisr) #else #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) #endif #define REASON_FP ESR_FP #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) #define REASON_PRIVILEGED ESR_PPR #define REASON_TRAP ESR_PTR /* single-step stuff */ #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) #else /* On non-4xx, the reason for the machine check or program exception is in the MSR. */ #define get_reason(regs) ((regs)->msr) #define get_mc_reason(regs) ((regs)->msr) #define REASON_FP 0x100000 #define REASON_ILLEGAL 0x80000 #define REASON_PRIVILEGED 0x40000 #define REASON_TRAP 0x20000 #define single_stepping(regs) ((regs)->msr & MSR_SE) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) #endif /* * This is "fall-back" implementation for configurations * which don't provide platform-specific machine check info */ void __attribute__ ((weak)) platform_machine_check(struct pt_regs *regs) { } void machine_check_exception(struct pt_regs *regs) { int recover = 0; unsigned long reason = get_mc_reason(regs); /* See if any machine dependent calls */ if (ppc_md.machine_check_exception) recover = ppc_md.machine_check_exception(regs); if (recover) return; if (user_mode(regs)) { regs->msr |= MSR_RI; _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); return; } #if defined(CONFIG_8xx) && defined(CONFIG_PCI) /* the qspan pci read routines can cause machine checks -- Cort */ bad_page_fault(regs, regs->dar, SIGBUS); return; #endif if (debugger_fault_handler(regs)) { regs->msr |= MSR_RI; return; } if (check_io_access(regs)) return; #if defined(CONFIG_4xx) && !defined(CONFIG_440A) if (reason & ESR_IMCP) { printk("Instruction"); mtspr(SPRN_ESR, reason & ~ESR_IMCP); } else printk("Data"); printk(" machine check in kernel mode.\n"); #elif defined(CONFIG_440A) printk("Machine check in kernel mode.\n"); if (reason & ESR_IMCP){ printk("Instruction Synchronous Machine Check exception\n"); mtspr(SPRN_ESR, reason & ~ESR_IMCP); } else { u32 mcsr = mfspr(SPRN_MCSR); if (mcsr & MCSR_IB) printk("Instruction Read PLB Error\n"); if (mcsr & MCSR_DRB) printk("Data Read PLB Error\n"); if (mcsr & MCSR_DWB) printk("Data Write PLB Error\n"); if (mcsr & MCSR_TLBP) printk("TLB Parity Error\n"); if (mcsr & MCSR_ICP){ flush_instruction_cache(); printk("I-Cache Parity Error\n"); } if (mcsr & MCSR_DCSP) printk("D-Cache Search Parity Error\n"); if (mcsr & MCSR_DCFP) printk("D-Cache Flush Parity Error\n"); if (mcsr & MCSR_IMPE) printk("Machine Check exception is imprecise\n"); /* Clear MCSR */ mtspr(SPRN_MCSR, mcsr); } #elif defined (CONFIG_E500) printk("Machine check in kernel mode.\n"); printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) printk("Machine Check Signal\n"); if (reason & MCSR_ICPERR) printk("Instruction Cache Parity Error\n"); if (reason & MCSR_DCP_PERR) printk("Data Cache Push Parity Error\n"); if (reason & MCSR_DCPERR) printk("Data Cache Parity Error\n"); if (reason & MCSR_GL_CI) printk("Guarded Load or Cache-Inhibited stwcx.\n"); if (reason & MCSR_BUS_IAERR) printk("Bus - Instruction Address Error\n"); if (reason & MCSR_BUS_RAERR) printk("Bus - Read Address Error\n"); if (reason & MCSR_BUS_WAERR) printk("Bus - Write Address Error\n"); if (reason & MCSR_BUS_IBERR) printk("Bus - Instruction Data Error\n"); if (reason & MCSR_BUS_RBERR) printk("Bus - Read Data Bus Error\n"); if (reason & MCSR_BUS_WBERR) printk("Bus - Read Data Bus Error\n"); if (reason & MCSR_BUS_IPERR) printk("Bus - Instruction Parity Error\n"); if (reason & MCSR_BUS_RPERR) printk("Bus - Read Parity Error\n"); #elif defined (CONFIG_E200) printk("Machine check in kernel mode.\n"); printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) printk("Machine Check Signal\n"); if (reason & MCSR_CP_PERR) printk("Cache Push Parity Error\n"); if (reason & MCSR_CPERR) printk("Cache Parity Error\n"); if (reason & MCSR_EXCP_ERR) printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); if (reason & MCSR_BUS_IRERR) printk("Bus - Read Bus Error on instruction fetch\n"); if (reason & MCSR_BUS_DRERR) printk("Bus - Read Bus Error on data load\n"); if (reason & MCSR_BUS_WRERR) printk("Bus - Write Bus Error on buffered store or cache line push\n"); #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ printk("Machine check in kernel mode.\n"); printk("Caused by (from SRR1=%lx): ", reason); switch (reason & 0x601F0000) { case 0x80000: printk("Machine check signal\n"); break; case 0: /* for 601 */ case 0x40000: case 0x140000: /* 7450 MSS error and TEA */ printk("Transfer error ack signal\n"); break; case 0x20000: printk("Data parity error signal\n"); break; case 0x10000: printk("Address parity error signal\n"); break; case 0x20000000: printk("L1 Data Cache error\n"); break; case 0x40000000: printk("L1 Instruction Cache error\n"); break; case 0x00100000: printk("L2 data cache parity error\n"); break; default: printk("Unknown values in msr\n"); } #endif /* CONFIG_4xx */ /* * Optional platform-provided routine to print out * additional info, e.g. bus error registers. */ platform_machine_check(regs); if (debugger_fault_handler(regs)) return; die("Machine check", regs, SIGBUS); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable Machine check"); } void SMIException(struct pt_regs *regs) { die("System Management Interrupt", regs, SIGABRT); } void unknown_exception(struct pt_regs *regs) { printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); _exception(SIGTRAP, regs, 0, 0); } void instruction_breakpoint_exception(struct pt_regs *regs) { if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; if (debugger_iabr_match(regs)) return; _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); } void RunModeException(struct pt_regs *regs) { _exception(SIGTRAP, regs, 0, 0); } void __kprobes single_step_exception(struct pt_regs *regs) { regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; if (debugger_sstep(regs)) return; _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); } /* * After we have successfully emulated an instruction, we have to * check if the instruction was being single-stepped, and if so, * pretend we got a single-step exception. This was pointed out * by Kumar Gala. -- paulus */ static void emulate_single_step(struct pt_regs *regs) { if (single_stepping(regs)) { clear_single_step(regs); _exception(SIGTRAP, regs, TRAP_TRACE, 0); } } static void parse_fpe(struct pt_regs *regs) { int code = 0; unsigned long fpscr; flush_fp_to_thread(current); fpscr = current->thread.fpscr.val; /* Invalid operation */ if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) code = FPE_FLTINV; /* Overflow */ else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) code = FPE_FLTOVF; /* Underflow */ else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) code = FPE_FLTUND; /* Divide by zero */ else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) code = FPE_FLTDIV; /* Inexact result */ else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) code = FPE_FLTRES; _exception(SIGFPE, regs, code, regs->nip); } /* * Illegal instruction emulation support. Originally written to * provide the PVR to user applications using the mfspr rd, PVR. * Return non-zero if we can't emulate, or -EFAULT if the associated * memory access caused an access fault. Return zero on success. * * There are a couple of ways to do this, either "decode" the instruction * or directly match lots of bits. In this case, matching lots of * bits is faster and easier. * */ #define INST_MFSPR_PVR 0x7c1f42a6 #define INST_MFSPR_PVR_MASK 0xfc1fffff #define INST_DCBA 0x7c0005ec #define INST_DCBA_MASK 0xfc0007fe #define INST_MCRXR 0x7c000400 #define INST_MCRXR_MASK 0xfc0007fe #define INST_STRING 0x7c00042a #define INST_STRING_MASK 0xfc0007fe #define INST_STRING_GEN_MASK 0xfc00067e #define INST_LSWI 0x7c0004aa #define INST_LSWX 0x7c00042a #define INST_STSWI 0x7c0005aa #define INST_STSWX 0x7c00052a #define INST_POPCNTB 0x7c0000f4 #define INST_POPCNTB_MASK 0xfc0007fe static int emulate_string_inst(struct pt_regs *regs, u32 instword) { u8 rT = (instword >> 21) & 0x1f; u8 rA = (instword >> 16) & 0x1f; u8 NB_RB = (instword >> 11) & 0x1f; u32 num_bytes; unsigned long EA; int pos = 0; /* Early out if we are an invalid form of lswx */ if ((instword & INST_STRING_MASK) == INST_LSWX) if ((rT == rA) || (rT == NB_RB)) return -EINVAL; EA = (rA == 0) ? 0 : regs->gpr[rA]; switch (instword & INST_STRING_MASK) { case INST_LSWX: case INST_STSWX: EA += NB_RB; num_bytes = regs->xer & 0x7f; break; case INST_LSWI: case INST_STSWI: num_bytes = (NB_RB == 0) ? 32 : NB_RB; break; default: return -EINVAL; } while (num_bytes != 0) { u8 val; u32 shift = 8 * (3 - (pos & 0x3)); switch ((instword & INST_STRING_MASK)) { case INST_LSWX: case INST_LSWI: if (get_user(val, (u8 __user *)EA)) return -EFAULT; /* first time updating this reg, * zero it out */ if (pos == 0) regs->gpr[rT] = 0; regs->gpr[rT] |= val << shift; break; case INST_STSWI: case INST_STSWX: val = regs->gpr[rT] >> shift; if (put_user(val, (u8 __user *)EA)) return -EFAULT; break; } /* move EA to next address */ EA += 1; num_bytes--; /* manage our position within the register */ if (++pos == 4) { pos = 0; if (++rT == 32) rT = 0; } } return 0; } static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) { u32 ra,rs; unsigned long tmp; ra = (instword >> 16) & 0x1f; rs = (instword >> 21) & 0x1f; tmp = regs->gpr[rs]; tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; regs->gpr[ra] = tmp; return 0; } static int emulate_instruction(struct pt_regs *regs) { u32 instword; u32 rd; if (!user_mode(regs) || (regs->msr & MSR_LE)) return -EINVAL; CHECK_FULL_REGS(regs); if (get_user(instword, (u32 __user *)(regs->nip))) return -EFAULT; /* Emulate the mfspr rD, PVR. */ if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { rd = (instword >> 21) & 0x1f; regs->gpr[rd] = mfspr(SPRN_PVR); return 0; } /* Emulating the dcba insn is just a no-op. */ if ((instword & INST_DCBA_MASK) == INST_DCBA) return 0; /* Emulate the mcrxr insn. */ if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { int shift = (instword >> 21) & 0x1c; unsigned long msk = 0xf0000000UL >> shift; regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); regs->xer &= ~0xf0000000UL; return 0; } /* Emulate load/store string insn. */ if ((instword & INST_STRING_GEN_MASK) == INST_STRING) return emulate_string_inst(regs, instword); /* Emulate the popcntb (Population Count Bytes) instruction. */ if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) { return emulate_popcntb_inst(regs, instword); } return -EINVAL; } /* * Look through the list of trap instructions that are used for BUG(), * BUG_ON() and WARN_ON() and see if we hit one. At this point we know * that the exception was caused by a trap instruction of some kind. * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 * otherwise. */ extern struct bug_entry __start___bug_table[], __stop___bug_table[]; #ifndef CONFIG_MODULES #define module_find_bug(x) NULL #endif struct bug_entry *find_bug(unsigned long bugaddr) { struct bug_entry *bug; for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) if (bugaddr == bug->bug_addr) return bug; return module_find_bug(bugaddr); } static int check_bug_trap(struct pt_regs *regs) { struct bug_entry *bug; unsigned long addr; if (regs->msr & MSR_PR) return 0; /* not in kernel */ addr = regs->nip; /* address of trap instruction */ if (addr < PAGE_OFFSET) return 0; bug = find_bug(regs->nip); if (bug == NULL) return 0; if (bug->line & BUG_WARNING_TRAP) { /* this is a WARN_ON rather than BUG/BUG_ON */ printk(KERN_ERR "Badness in %s at %s:%ld\n", bug->function, bug->file, bug->line & ~BUG_WARNING_TRAP); dump_stack(); return 1; } printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n", bug->function, bug->file, bug->line); return 0; } void __kprobes program_check_exception(struct pt_regs *regs) { unsigned int reason = get_reason(regs); extern int do_mathemu(struct pt_regs *regs); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, * but there seems to be a hardware bug on the 405GP (RevD) * that means ESR is sometimes set incorrectly - either to * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a * pattern to occurences etc. -dgibson 31/Mar/2003 */ if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { emulate_single_step(regs); return; } #endif /* CONFIG_MATH_EMULATION */ if (reason & REASON_FP) { /* IEEE FP exception */ parse_fpe(regs); return; } if (reason & REASON_TRAP) { /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; if (debugger_bpt(regs)) return; if (check_bug_trap(regs)) { regs->nip += 4; return; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); return; } local_irq_enable(); /* Try to emulate it if we should. */ if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { switch (emulate_instruction(regs)) { case 0: regs->nip += 4; emulate_single_step(regs); return; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); return; } } if (reason & REASON_PRIVILEGED) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } void alignment_exception(struct pt_regs *regs) { int fixed = 0; /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); if (fixed == 1) { regs->nip += 4; /* skip over emulated instruction */ emulate_single_step(regs); return; } /* Operand address was bad */ if (fixed == -EFAULT) { if (user_mode(regs)) _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); else /* Search exception table */ bad_page_fault(regs, regs->dar, SIGSEGV); return; } _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); } void StackOverflow(struct pt_regs *regs) { printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", current, regs->gpr[1]); debugger(regs); show_regs(regs); panic("kernel stack overflow"); } void nonrecoverable_exception(struct pt_regs *regs) { printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", regs->nip, regs->msr); debugger(regs); die("nonrecoverable exception", regs, SIGKILL); } void trace_syscall(struct pt_regs *regs) { printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", current, current->pid, regs->nip, regs->link, regs->gpr[0], regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); } void kernel_fp_unavailable_exception(struct pt_regs *regs) { printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); } void altivec_unavailable_exception(struct pt_regs *regs) { #if !defined(CONFIG_ALTIVEC) if (user_mode(regs)) { /* A user program has executed an altivec instruction, but this kernel doesn't support altivec. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); return; } #endif printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); } void performance_monitor_exception(struct pt_regs *regs) { perf_irq(regs); } #ifdef CONFIG_8xx void SoftwareEmulation(struct pt_regs *regs) { extern int do_mathemu(struct pt_regs *); extern int Soft_emulate_8xx(struct pt_regs *); int errcode; CHECK_FULL_REGS(regs); if (!user_mode(regs)) { debugger(regs); die("Kernel Mode Software FPU Emulation", regs, SIGFPE); } #ifdef CONFIG_MATH_EMULATION errcode = do_mathemu(regs); #else errcode = Soft_emulate_8xx(regs); #endif if (errcode) { if (errcode > 0) _exception(SIGFPE, regs, 0, 0); else if (errcode == -EFAULT) _exception(SIGSEGV, regs, 0, 0); else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } else emulate_single_step(regs); } #endif /* CONFIG_8xx */ #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) void DebugException(struct pt_regs *regs, unsigned long debug_status) { if (debug_status & DBSR_IC) { /* instruction completion */ regs->msr &= ~MSR_DE; if (user_mode(regs)) { current->thread.dbcr0 &= ~DBCR0_IC; } else { /* Disable instruction completion */ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); /* Clear the instruction completion event */ mtspr(SPRN_DBSR, DBSR_IC); if (debugger_sstep(regs)) return; } _exception(SIGTRAP, regs, TRAP_TRACE, 0); } } #endif /* CONFIG_4xx || CONFIG_BOOKE */ #if !defined(CONFIG_TAU_INT) void TAUException(struct pt_regs *regs) { printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", regs->nip, regs->msr, regs->trap, print_tainted()); } #endif /* CONFIG_INT_TAU */ #ifdef CONFIG_ALTIVEC void altivec_assist_exception(struct pt_regs *regs) { int err; if (!user_mode(regs)) { printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" " at %lx\n", regs->nip); die("Kernel VMX/Altivec assist exception", regs, SIGILL); } flush_altivec_to_thread(current); err = emulate_altivec(regs); if (err == 0) { regs->nip += 4; /* skip emulated instruction */ emulate_single_step(regs); return; } if (err == -EFAULT) { /* got an error reading the instruction */ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); } else { /* didn't recognize the instruction */ /* XXX quick hack for now: set the non-Java bit in the VSCR */ if (printk_ratelimit()) printk(KERN_ERR "Unrecognized altivec instruction " "in %s at %lx\n", current->comm, regs->nip); current->thread.vscr.u[3] |= 0x10000; } } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_FSL_BOOKE void CacheLockingException(struct pt_regs *regs, unsigned long address, unsigned long error_code) { /* We treat cache locking instructions from the user * as priv ops, in the future we could try to do * something smarter */ if (error_code & (ESR_DLK|ESR_ILK)) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); return; } #endif /* CONFIG_FSL_BOOKE */ #ifdef CONFIG_SPE void SPEFloatingPointException(struct pt_regs *regs) { unsigned long spefscr; int fpexc_mode; int code = 0; spefscr = current->thread.spefscr; fpexc_mode = current->thread.fpexc_mode; /* Hardware does not neccessarily set sticky * underflow/overflow/invalid flags */ if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { code = FPE_FLTOVF; spefscr |= SPEFSCR_FOVFS; } else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { code = FPE_FLTUND; spefscr |= SPEFSCR_FUNFS; } else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) code = FPE_FLTDIV; else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { code = FPE_FLTINV; spefscr |= SPEFSCR_FINVS; } else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) code = FPE_FLTRES; current->thread.spefscr = spefscr; _exception(SIGFPE, regs, code, regs->nip); return; } #endif /* * We enter here if we get an unrecoverable exception, that is, one * that happened at a point where the RI (recoverable interrupt) bit * in the MSR is 0. This indicates that SRR0/1 are live, and that * we therefore lost state by taking this exception. */ void unrecoverable_exception(struct pt_regs *regs) { printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable exception", regs, SIGABRT); } #ifdef CONFIG_BOOKE_WDT /* * Default handler for a Watchdog exception, * spins until a reboot occurs */ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) { /* Generic WatchdogHandler, implement your own */ mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); return; } void WatchdogException(struct pt_regs *regs) { printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); WatchdogHandler(regs); } #endif /* * We enter here if we discover during exception entry that we are * running in supervisor mode with a userspace value in the stack pointer. */ void kernel_bad_stack(struct pt_regs *regs) { printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", regs->gpr[1], regs->nip); die("Bad kernel stack pointer", regs, SIGABRT); } void __init trap_init(void) { }
./CrossVul/dataset_final_sorted/CWE-19/c/bad_2484_0
crossvul-cpp_data_good_1453_2
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_attr.h" #include "xfs_attr_sf.h" #include "xfs_attr_remote.h" #include "xfs_attr_leaf.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_buf_item.h" #include "xfs_cksum.h" #include "xfs_dinode.h" #include "xfs_dir2.h" STATIC int xfs_attr_shortform_compare(const void *a, const void *b) { xfs_attr_sf_sort_t *sa, *sb; sa = (xfs_attr_sf_sort_t *)a; sb = (xfs_attr_sf_sort_t *)b; if (sa->hash < sb->hash) { return(-1); } else if (sa->hash > sb->hash) { return(1); } else { return(sa->entno - sb->entno); } } #define XFS_ISRESET_CURSOR(cursor) \ (!((cursor)->initted) && !((cursor)->hashval) && \ !((cursor)->blkno) && !((cursor)->offset)) /* * Copy out entries of shortform attribute lists for attr_list(). * Shortform attribute lists are not stored in hashval sorted order. * If the output buffer is not large enough to hold them all, then we * we have to calculate each entries' hashvalue and sort them before * we can begin returning them to the user. */ int xfs_attr_shortform_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_sf_sort_t *sbuf, *sbp; xfs_attr_shortform_t *sf; xfs_attr_sf_entry_t *sfe; xfs_inode_t *dp; int sbsize, nsbuf, count, i; int error; ASSERT(context != NULL); dp = context->dp; ASSERT(dp != NULL); ASSERT(dp->i_afp != NULL); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; ASSERT(sf != NULL); if (!sf->hdr.count) return(0); cursor = context->cursor; ASSERT(cursor != NULL); trace_xfs_attr_list_sf(context); /* * If the buffer is large enough and the cursor is at the start, * do not bother with sorting since we will return everything in * one buffer and another call using the cursor won't need to be * made. * Note the generous fudge factor of 16 overhead bytes per entry. * If bufsize is zero then put_listent must be a search function * and can just scan through what we have. */ if (context->bufsize == 0 || (XFS_ISRESET_CURSOR(cursor) && (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { error = context->put_listent(context, sfe->flags, sfe->nameval, (int)sfe->namelen, (int)sfe->valuelen, &sfe->nameval[sfe->namelen]); /* * Either search callback finished early or * didn't fit it all in the buffer after all. */ if (context->seen_enough) break; if (error) return error; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } trace_xfs_attr_list_sf_all(context); return(0); } /* do no more for a search callback */ if (context->bufsize == 0) return 0; /* * It didn't all fit, so we have to sort everything on hashval. */ sbsize = sf->hdr.count * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); /* * Scan the attribute list for the rest of the entries, storing * the relevant info from only those that match into a buffer. */ nsbuf = 0; for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, sfe); kmem_free(sbuf); return XFS_ERROR(EFSCORRUPTED); } sbp->entno = i; sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); sbp->name = sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ sbp->valuelen = sfe->valuelen; sbp->flags = sfe->flags; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); sbp++; nsbuf++; } /* * Sort the entries on hash then entno. */ xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); /* * Re-find our place IN THE SORTED LIST. */ count = 0; cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { if (sbp->hash == cursor->hashval) { if (cursor->offset == count) { break; } count++; } else if (sbp->hash > cursor->hashval) { break; } } if (i == nsbuf) { kmem_free(sbuf); return(0); } /* * Loop putting entries into the user buffer. */ for ( ; i < nsbuf; i++, sbp++) { if (cursor->hashval != sbp->hash) { cursor->hashval = sbp->hash; cursor->offset = 0; } error = context->put_listent(context, sbp->flags, sbp->name, sbp->namelen, sbp->valuelen, &sbp->name[sbp->namelen]); if (error) return error; if (context->seen_enough) break; cursor->offset++; } kmem_free(sbuf); return(0); } STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_leafblock_t *leaf; xfs_da_intnode_t *node; struct xfs_attr3_icleaf_hdr leafhdr; struct xfs_da3_icnode_hdr nodehdr; struct xfs_da_node_entry *btree; int error, i; struct xfs_buf *bp; struct xfs_inode *dp = context->dp; trace_xfs_attr_node_list(context); cursor = context->cursor; cursor->initted = 1; /* * Do all sorts of validation on the passed-in cursor structure. * If anything is amiss, ignore the cursor and look up the hashval * starting from the btree root. */ bp = NULL; if (cursor->blkno > 0) { error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); if ((error != 0) && (error != EFSCORRUPTED)) return(error); if (bp) { struct xfs_attr_leaf_entry *entries; node = bp->b_addr; switch (be16_to_cpu(node->hdr.info.magic)) { case XFS_DA_NODE_MAGIC: case XFS_DA3_NODE_MAGIC: trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; break; case XFS_ATTR_LEAF_MAGIC: case XFS_ATTR3_LEAF_MAGIC: leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); if (cursor->hashval > be32_to_cpu( entries[leafhdr.count - 1].hashval)) { trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= be32_to_cpu( entries[0].hashval)) { trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } break; default: trace_xfs_attr_list_wrong_blk(context); xfs_trans_brelse(NULL, bp); bp = NULL; } } } /* * We did not find what we expected given the cursor's contents, * so we start from the top and work down based on the hash value. * Note that start of node block is same as start of leaf block. */ if (bp == NULL) { cursor->blkno = 0; for (;;) { __uint16_t magic; error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); if (error) return(error); node = bp->b_addr; magic = be16_to_cpu(node->hdr.info.magic); if (magic == XFS_ATTR_LEAF_MAGIC || magic == XFS_ATTR3_LEAF_MAGIC) break; if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, context->dp->i_mount, node); xfs_trans_brelse(NULL, bp); return XFS_ERROR(EFSCORRUPTED); } dp->d_ops->node_hdr_from_disk(&nodehdr, node); btree = dp->d_ops->node_tree_p(node); for (i = 0; i < nodehdr.count; btree++, i++) { if (cursor->hashval <= be32_to_cpu(btree->hashval)) { cursor->blkno = be32_to_cpu(btree->before); trace_xfs_attr_list_node_descend(context, btree); break; } } if (i == nodehdr.count) { xfs_trans_brelse(NULL, bp); return 0; } xfs_trans_brelse(NULL, bp); } } ASSERT(bp != NULL); /* * Roll upward through the blocks, processing each leaf block in * order. As long as there is space in the result buffer, keep * adding the information. */ for (;;) { leaf = bp->b_addr; error = xfs_attr3_leaf_list_int(bp, context); if (error) { xfs_trans_brelse(NULL, bp); return error; } xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf); if (context->seen_enough || leafhdr.forw == 0) break; cursor->blkno = leafhdr.forw; xfs_trans_brelse(NULL, bp); error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp); if (error) return error; } xfs_trans_brelse(NULL, bp); return 0; } /* * Copy out attribute list entries for attr_list(), for leaf attribute lists. */ int xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) { struct attrlist_cursor_kern *cursor; struct xfs_attr_leafblock *leaf; struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entries; struct xfs_attr_leaf_entry *entry; int retval; int i; trace_xfs_attr_list_leaf(context); leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); cursor = context->cursor; cursor->initted = 1; /* * Re-find our place in the leaf block if this is a new syscall. */ if (context->resynch) { entry = &entries[0]; for (i = 0; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) == cursor->hashval) { if (cursor->offset == context->dupcnt) { context->dupcnt = 0; break; } context->dupcnt++; } else if (be32_to_cpu(entry->hashval) > cursor->hashval) { context->dupcnt = 0; break; } } if (i == ichdr.count) { trace_xfs_attr_list_notfound(context); return 0; } } else { entry = &entries[0]; i = 0; } context->resynch = 0; /* * We have found our place, start copying out the new attributes. */ retval = 0; for (; i < ichdr.count; entry++, i++) { if (be32_to_cpu(entry->hashval) != cursor->hashval) { cursor->hashval = be32_to_cpu(entry->hashval); cursor->offset = 0; } if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* skip incomplete entries */ if (entry->flags & XFS_ATTR_LOCAL) { xfs_attr_leaf_name_local_t *name_loc = xfs_attr3_leaf_name_local(leaf, i); retval = context->put_listent(context, entry->flags, name_loc->nameval, (int)name_loc->namelen, be16_to_cpu(name_loc->valuelen), &name_loc->nameval[name_loc->namelen]); if (retval) return retval; } else { xfs_attr_leaf_name_remote_t *name_rmt = xfs_attr3_leaf_name_remote(leaf, i); int valuelen = be32_to_cpu(name_rmt->valuelen); if (context->put_value) { xfs_da_args_t args; memset((char *)&args, 0, sizeof(args)); args.dp = context->dp; args.whichfork = XFS_ATTR_FORK; args.valuelen = valuelen; args.rmtvaluelen = valuelen; args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); args.rmtblkno = be32_to_cpu(name_rmt->valueblk); args.rmtblkcnt = xfs_attr3_rmt_blocks( args.dp->i_mount, valuelen); retval = xfs_attr_rmtval_get(&args); if (retval) return retval; retval = context->put_listent(context, entry->flags, name_rmt->name, (int)name_rmt->namelen, valuelen, args.value); kmem_free(args.value); } else { retval = context->put_listent(context, entry->flags, name_rmt->name, (int)name_rmt->namelen, valuelen, NULL); } if (retval) return retval; } if (context->seen_enough) break; cursor->offset++; } trace_xfs_attr_list_leaf_end(context); return retval; } /* * Copy out attribute entries for attr_list(), for leaf attribute lists. */ STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context) { int error; struct xfs_buf *bp; trace_xfs_attr_leaf_list(context); context->cursor->blkno = 0; error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); if (error) return XFS_ERROR(error); error = xfs_attr3_leaf_list_int(bp, context); xfs_trans_brelse(NULL, bp); return XFS_ERROR(error); } int xfs_attr_list_int( xfs_attr_list_context_t *context) { int error; xfs_inode_t *dp = context->dp; uint lock_mode; XFS_STATS_INC(xs_attr_list); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return EIO; /* * Decide on what work routines to call based on the inode size. */ lock_mode = xfs_ilock_attr_map_shared(dp); if (!xfs_inode_hasattr(dp)) { error = 0; } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { error = xfs_attr_shortform_list(context); } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { error = xfs_attr_leaf_list(context); } else { error = xfs_attr_node_list(context); } xfs_iunlock(dp, lock_mode); return error; } #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ (((struct attrlist_ent *) 0)->a_name - (char *) 0) #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \ & ~(sizeof(u_int32_t)-1)) /* * Format an attribute and copy it out to the user's buffer. * Take care to check values and protect against them changing later, * we may be reading them directly out of a user buffer. */ STATIC int xfs_attr_put_listent( xfs_attr_list_context_t *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) { struct attrlist *alist = (struct attrlist *)context->alist; attrlist_ent_t *aep; int arraytop; ASSERT(!(context->flags & ATTR_KERNOVAL)); ASSERT(context->count >= 0); ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); ASSERT(context->firstu >= sizeof(*alist)); ASSERT(context->firstu <= context->bufsize); /* * Only list entries in the right namespace. */ if (((context->flags & ATTR_SECURE) == 0) != ((flags & XFS_ATTR_SECURE) == 0)) return 0; if (((context->flags & ATTR_ROOT) == 0) != ((flags & XFS_ATTR_ROOT) == 0)) return 0; arraytop = sizeof(*alist) + context->count * sizeof(alist->al_offset[0]); context->firstu -= ATTR_ENTSIZE(namelen); if (context->firstu < arraytop) { trace_xfs_attr_list_full(context); alist->al_more = 1; context->seen_enough = 1; return 1; } aep = (attrlist_ent_t *)&context->alist[context->firstu]; aep->a_valuelen = valuelen; memcpy(aep->a_name, name, namelen); aep->a_name[namelen] = 0; alist->al_offset[context->count++] = context->firstu; alist->al_count = context->count; trace_xfs_attr_list_add(context); return 0; } /* * Generate a list of extended attribute names and optionally * also value lengths. Positive return value follows the XFS * convention of being an error, zero or negative return code * is the length of the buffer returned (negated), indicating * success. */ int xfs_attr_list( xfs_inode_t *dp, char *buffer, int bufsize, int flags, attrlist_cursor_kern_t *cursor) { xfs_attr_list_context_t context; struct attrlist *alist; int error; /* * Validate the cursor. */ if (cursor->pad1 || cursor->pad2) return(XFS_ERROR(EINVAL)); if ((cursor->initted == 0) && (cursor->hashval || cursor->blkno || cursor->offset)) return XFS_ERROR(EINVAL); /* * Check for a properly aligned buffer. */ if (((long)buffer) & (sizeof(int)-1)) return XFS_ERROR(EFAULT); if (flags & ATTR_KERNOVAL) bufsize = 0; /* * Initialize the output buffer. */ memset(&context, 0, sizeof(context)); context.dp = dp; context.cursor = cursor; context.resynch = 1; context.flags = flags; context.alist = buffer; context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ context.firstu = context.bufsize; context.put_listent = xfs_attr_put_listent; alist = (struct attrlist *)context.alist; alist->al_count = 0; alist->al_more = 0; alist->al_offset[0] = context.bufsize; error = xfs_attr_list_int(&context); ASSERT(error >= 0); return error; }
./CrossVul/dataset_final_sorted/CWE-19/c/good_1453_2
crossvul-cpp_data_good_1842_2
/* * linux/fs/ext4/xattr.c * * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> * * Fix by Harrison Xing <harrison@mountainviewdata.com>. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>. * Extended attributes for symlinks and special files added per * suggestion of Luka Renko <luka.renko@hermes.si>. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, * Red Hat Inc. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz * and Andreas Gruenbacher <agruen@suse.de>. */ /* * Extended attributes are stored directly in inodes (on file systems with * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl * field contains the block number if an inode uses an additional block. All * attributes must fit in the inode and one additional block. Blocks that * contain the identical set of attributes may be shared among several inodes. * Identical blocks are detected by keeping a cache of blocks that have * recently been accessed. * * The attributes in inodes and on blocks have a different header; the entries * are stored in the same format: * * +------------------+ * | header | * | entry 1 | | * | entry 2 | | growing downwards * | entry 3 | v * | four null bytes | * | . . . | * | value 1 | ^ * | value 3 | | growing upwards * | value 2 | | * +------------------+ * * The header is followed by multiple entry descriptors. In disk blocks, the * entry descriptors are kept sorted. In inodes, they are unsorted. The * attribute values are aligned to the end of the block in no specific order. * * Locking strategy * ---------------- * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem. * EA blocks are only changed if they are exclusive to an inode, so * holding xattr_sem also means that nothing but the EA block's reference * count can change. Multiple writers to the same block are synchronized * by the buffer lock. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/mbcache2.h> #include <linux/quotaops.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" #include "acl.h" #ifdef EXT4_XATTR_DEBUG # define ea_idebug(inode, f...) do { \ printk(KERN_DEBUG "inode %s:%lu: ", \ inode->i_sb->s_id, inode->i_ino); \ printk(f); \ printk("\n"); \ } while (0) # define ea_bdebug(bh, f...) do { \ printk(KERN_DEBUG "block %pg:%lu: ", \ bh->b_bdev, (unsigned long) bh->b_blocknr); \ printk(f); \ printk("\n"); \ } while (0) #else # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__) # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) #endif static void ext4_xattr_cache_insert(struct mb2_cache *, struct buffer_head *); static struct buffer_head *ext4_xattr_cache_find(struct inode *, struct ext4_xattr_header *, struct mb2_cache_entry **); static void ext4_xattr_rehash(struct ext4_xattr_header *, struct ext4_xattr_entry *); static int ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size); static const struct xattr_handler *ext4_xattr_handler_map[] = { [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler, #ifdef CONFIG_EXT4_FS_POSIX_ACL [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler, #endif [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler, #ifdef CONFIG_EXT4_FS_SECURITY [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, #endif }; const struct xattr_handler *ext4_xattr_handlers[] = { &ext4_xattr_user_handler, &ext4_xattr_trusted_handler, #ifdef CONFIG_EXT4_FS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif #ifdef CONFIG_EXT4_FS_SECURITY &ext4_xattr_security_handler, #endif NULL }; #define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \ inode->i_sb->s_fs_info)->s_mb_cache) static __le32 ext4_xattr_block_csum(struct inode *inode, sector_t block_nr, struct ext4_xattr_header *hdr) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le32 save_csum; __le64 dsk_block_nr = cpu_to_le64(block_nr); save_csum = hdr->h_checksum; hdr->h_checksum = 0; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, sizeof(dsk_block_nr)); csum = ext4_chksum(sbi, csum, (__u8 *)hdr, EXT4_BLOCK_SIZE(inode->i_sb)); hdr->h_checksum = save_csum; return cpu_to_le32(csum); } static int ext4_xattr_block_csum_verify(struct inode *inode, sector_t block_nr, struct ext4_xattr_header *hdr) { if (ext4_has_metadata_csum(inode->i_sb) && (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) return 0; return 1; } static void ext4_xattr_block_csum_set(struct inode *inode, sector_t block_nr, struct ext4_xattr_header *hdr) { if (!ext4_has_metadata_csum(inode->i_sb)) return; hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); } static inline int ext4_handle_dirty_xattr_block(handle_t *handle, struct inode *inode, struct buffer_head *bh) { ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); return ext4_handle_dirty_metadata(handle, inode, bh); } static inline const struct xattr_handler * ext4_xattr_handler(int name_index) { const struct xattr_handler *handler = NULL; if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map)) handler = ext4_xattr_handler_map[name_index]; return handler; } /* * Inode operation listxattr() * * d_inode(dentry)->i_mutex: don't care */ ssize_t ext4_listxattr(struct dentry *dentry, char *buffer, size_t size) { return ext4_xattr_list(dentry, buffer, size); } static int ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end, void *value_start) { struct ext4_xattr_entry *e = entry; while (!IS_LAST_ENTRY(e)) { struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); if ((void *)next >= end) return -EFSCORRUPTED; e = next; } while (!IS_LAST_ENTRY(entry)) { if (entry->e_value_size != 0 && (value_start + le16_to_cpu(entry->e_value_offs) < (void *)e + sizeof(__u32) || value_start + le16_to_cpu(entry->e_value_offs) + le32_to_cpu(entry->e_value_size) > end)) return -EFSCORRUPTED; entry = EXT4_XATTR_NEXT(entry); } return 0; } static inline int ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) { int error; if (buffer_verified(bh)) return 0; if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) return -EFSCORRUPTED; if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) return -EFSBADCRC; error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, bh->b_data); if (!error) set_buffer_verified(bh); return error; } static inline int ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) { size_t value_size = le32_to_cpu(entry->e_value_size); if (entry->e_value_block != 0 || value_size > size || le16_to_cpu(entry->e_value_offs) + value_size > size) return -EFSCORRUPTED; return 0; } static int ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index, const char *name, size_t size, int sorted) { struct ext4_xattr_entry *entry; size_t name_len; int cmp = 1; if (name == NULL) return -EINVAL; name_len = strlen(name); entry = *pentry; for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { cmp = name_index - entry->e_name_index; if (!cmp) cmp = name_len - entry->e_name_len; if (!cmp) cmp = memcmp(name, entry->e_name, name_len); if (cmp <= 0 && (sorted || cmp == 0)) break; } *pentry = entry; if (!cmp && ext4_xattr_check_entry(entry, size)) return -EFSCORRUPTED; return cmp ? -ENODATA : 0; } static int ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct buffer_head *bh = NULL; struct ext4_xattr_entry *entry; size_t size; int error; struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", name_index, name, buffer, (long)buffer_size); error = -ENODATA; if (!EXT4_I(inode)->i_file_acl) goto cleanup; ea_idebug(inode, "reading block %llu", (unsigned long long)EXT4_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); if (!bh) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext4_xattr_check_block(inode, bh)) { bad_block: EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EFSCORRUPTED; goto cleanup; } ext4_xattr_cache_insert(ext4_mb_cache, bh); entry = BFIRST(bh); error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1); if (error == -EFSCORRUPTED) goto bad_block; if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); if (buffer) { error = -ERANGE; if (size > buffer_size) goto cleanup; memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), size); } error = size; cleanup: brelse(bh); return error; } int ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; struct ext4_inode *raw_inode; struct ext4_iloc iloc; size_t size; void *end; int error; if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) return -ENODATA; error = ext4_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); entry = IFIRST(header); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; error = ext4_xattr_check_names(entry, end, entry); if (error) goto cleanup; error = ext4_xattr_find_entry(&entry, name_index, name, end - (void *)entry, 0); if (error) goto cleanup; size = le32_to_cpu(entry->e_value_size); if (buffer) { error = -ERANGE; if (size > buffer_size) goto cleanup; memcpy(buffer, (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), size); } error = size; cleanup: brelse(iloc.bh); return error; } /* * ext4_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ int ext4_xattr_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size) { int error; if (strlen(name) > 255) return -ERANGE; down_read(&EXT4_I(inode)->xattr_sem); error = ext4_xattr_ibody_get(inode, name_index, name, buffer, buffer_size); if (error == -ENODATA) error = ext4_xattr_block_get(inode, name_index, name, buffer, buffer_size); up_read(&EXT4_I(inode)->xattr_sem); return error; } static int ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, char *buffer, size_t buffer_size) { size_t rest = buffer_size; for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { const struct xattr_handler *handler = ext4_xattr_handler(entry->e_name_index); if (handler && (!handler->list || handler->list(dentry))) { const char *prefix = handler->prefix ?: handler->name; size_t prefix_len = strlen(prefix); size_t size = prefix_len + entry->e_name_len + 1; if (buffer) { if (size > rest) return -ERANGE; memcpy(buffer, prefix, prefix_len); buffer += prefix_len; memcpy(buffer, entry->e_name, entry->e_name_len); buffer += entry->e_name_len; *buffer++ = 0; } rest -= size; } } return buffer_size - rest; /* total size */ } static int ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); struct buffer_head *bh = NULL; int error; struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); ea_idebug(inode, "buffer=%p, buffer_size=%ld", buffer, (long)buffer_size); error = 0; if (!EXT4_I(inode)->i_file_acl) goto cleanup; ea_idebug(inode, "reading block %llu", (unsigned long long)EXT4_I(inode)->i_file_acl); bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); error = -EIO; if (!bh) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext4_xattr_check_block(inode, bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EFSCORRUPTED; goto cleanup; } ext4_xattr_cache_insert(ext4_mb_cache, bh); error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size); cleanup: brelse(bh); return error; } static int ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; struct ext4_iloc iloc; void *end; int error; if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR)) return 0; error = ext4_get_inode_loc(inode, &iloc); if (error) return error; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); if (error) goto cleanup; error = ext4_xattr_list_entries(dentry, IFIRST(header), buffer, buffer_size); cleanup: brelse(iloc.bh); return error; } /* * ext4_xattr_list() * * Copy a list of attribute names into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ static int ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { int ret, ret2; down_read(&EXT4_I(d_inode(dentry))->xattr_sem); ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; if (buffer) { buffer += ret; buffer_size -= ret; } ret = ext4_xattr_block_list(dentry, buffer, buffer_size); if (ret < 0) goto errout; ret += ret2; errout: up_read(&EXT4_I(d_inode(dentry))->xattr_sem); return ret; } /* * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is * not set, set it. */ static void ext4_xattr_update_super_block(handle_t *handle, struct super_block *sb) { if (ext4_has_feature_xattr(sb)) return; BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access"); if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { ext4_set_feature_xattr(sb); ext4_handle_dirty_super(handle, sb); } } /* * Release the xattr block BH: If the reference count is > 1, decrement it; * otherwise free the block. */ static void ext4_xattr_release_block(handle_t *handle, struct inode *inode, struct buffer_head *bh) { int error = 0; BUFFER_TRACE(bh, "get_write_access"); error = ext4_journal_get_write_access(handle, bh); if (error) goto out; lock_buffer(bh); if (BHDR(bh)->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); ea_bdebug(bh, "refcount now=0; freeing"); /* * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect freed block */ mb2_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash, bh->b_blocknr); get_bh(bh); unlock_buffer(bh); ext4_free_blocks(handle, inode, bh, 0, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } else { le32_add_cpu(&BHDR(bh)->h_refcount, -1); /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect * from a race where someone else frees the block (and releases * its journal_head) before we are done dirtying the buffer. In * nojournal mode this race is harmless and we actually cannot * call ext4_handle_dirty_xattr_block() with locked buffer as * that function can call sync_dirty_buffer() so for that case * we handle the dirtying after unlocking the buffer. */ if (ext4_handle_valid(handle)) error = ext4_handle_dirty_xattr_block(handle, inode, bh); unlock_buffer(bh); if (!ext4_handle_valid(handle)) error = ext4_handle_dirty_xattr_block(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); } out: ext4_std_error(inode->i_sb, error); return; } /* * Find the available free space for EAs. This also returns the total number of * bytes used by EA entries. */ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last, size_t *min_offs, void *base, int *total) { for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_block && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < *min_offs) *min_offs = offs; } if (total) *total += EXT4_XATTR_LEN(last->e_name_len); } return (*min_offs - ((void *)last - base) - sizeof(__u32)); } static int ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s) { struct ext4_xattr_entry *last; size_t free, min_offs = s->end - s->base, name_len = strlen(i->name); /* Compute min_offs and last. */ last = s->first; for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_block && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < min_offs) min_offs = offs; } } free = min_offs - ((void *)last - s->base) - sizeof(__u32); if (!s->not_found) { if (!s->here->e_value_block && s->here->e_value_size) { size_t size = le32_to_cpu(s->here->e_value_size); free += EXT4_XATTR_SIZE(size); } free += EXT4_XATTR_LEN(name_len); } if (i->value) { if (free < EXT4_XATTR_LEN(name_len) + EXT4_XATTR_SIZE(i->value_len)) return -ENOSPC; } if (i->value && s->not_found) { /* Insert the new name. */ size_t size = EXT4_XATTR_LEN(name_len); size_t rest = (void *)last - (void *)s->here + sizeof(__u32); memmove((void *)s->here + size, s->here, rest); memset(s->here, 0, size); s->here->e_name_index = i->name_index; s->here->e_name_len = name_len; memcpy(s->here->e_name, i->name, name_len); } else { if (!s->here->e_value_block && s->here->e_value_size) { void *first_val = s->base + min_offs; size_t offs = le16_to_cpu(s->here->e_value_offs); void *val = s->base + offs; size_t size = EXT4_XATTR_SIZE( le32_to_cpu(s->here->e_value_size)); if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) { /* The old and the new value have the same size. Just replace. */ s->here->e_value_size = cpu_to_le32(i->value_len); if (i->value == EXT4_ZERO_XATTR_VALUE) { memset(val, 0, size); } else { /* Clear pad bytes first. */ memset(val + size - EXT4_XATTR_PAD, 0, EXT4_XATTR_PAD); memcpy(val, i->value, i->value_len); } return 0; } /* Remove the old value. */ memmove(first_val + size, first_val, val - first_val); memset(first_val, 0, size); s->here->e_value_size = 0; s->here->e_value_offs = 0; min_offs += size; /* Adjust all value offsets. */ last = s->first; while (!IS_LAST_ENTRY(last)) { size_t o = le16_to_cpu(last->e_value_offs); if (!last->e_value_block && last->e_value_size && o < offs) last->e_value_offs = cpu_to_le16(o + size); last = EXT4_XATTR_NEXT(last); } } if (!i->value) { /* Remove the old name. */ size_t size = EXT4_XATTR_LEN(name_len); last = ENTRY((void *)last - size); memmove(s->here, (void *)s->here + size, (void *)last - (void *)s->here + sizeof(__u32)); memset(last, 0, size); } } if (i->value) { /* Insert the new value. */ s->here->e_value_size = cpu_to_le32(i->value_len); if (i->value_len) { size_t size = EXT4_XATTR_SIZE(i->value_len); void *val = s->base + min_offs - size; s->here->e_value_offs = cpu_to_le16(min_offs - size); if (i->value == EXT4_ZERO_XATTR_VALUE) { memset(val, 0, size); } else { /* Clear the pad bytes first. */ memset(val + size - EXT4_XATTR_PAD, 0, EXT4_XATTR_PAD); memcpy(val, i->value, i->value_len); } } } return 0; } struct ext4_xattr_block_find { struct ext4_xattr_search s; struct buffer_head *bh; }; static int ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; int error; ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", i->name_index, i->name, i->value, (long)i->value_len); if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl); error = -EIO; if (!bs->bh) goto cleanup; ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); if (ext4_xattr_check_block(inode, bs->bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EFSCORRUPTED; goto cleanup; } /* Find the named attribute. */ bs->s.base = BHDR(bs->bh); bs->s.first = BFIRST(bs->bh); bs->s.end = bs->bh->b_data + bs->bh->b_size; bs->s.here = bs->s.first; error = ext4_xattr_find_entry(&bs->s.here, i->name_index, i->name, bs->bh->b_size, 1); if (error && error != -ENODATA) goto cleanup; bs->s.not_found = error; } error = 0; cleanup: return error; } static int ext4_xattr_block_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_block_find *bs) { struct super_block *sb = inode->i_sb; struct buffer_head *new_bh = NULL; struct ext4_xattr_search *s = &bs->s; struct mb2_cache_entry *ce = NULL; int error = 0; struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); #define header(x) ((struct ext4_xattr_header *)(x)) if (i->value && i->value_len > sb->s_blocksize) return -ENOSPC; if (s->base) { BUFFER_TRACE(bs->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, bs->bh); if (error) goto cleanup; lock_buffer(bs->bh); if (header(s->base)->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash); /* * This must happen under buffer lock for * ext4_xattr_block_set() to reliably detect modified * block */ mb2_cache_entry_delete_block(ext4_mb_cache, hash, bs->bh->b_blocknr); ea_bdebug(bs->bh, "modifying in-place"); error = ext4_xattr_set_entry(i, s); if (!error) { if (!IS_LAST_ENTRY(s->first)) ext4_xattr_rehash(header(s->base), s->here); ext4_xattr_cache_insert(ext4_mb_cache, bs->bh); } unlock_buffer(bs->bh); if (error == -EFSCORRUPTED) goto bad_block; if (!error) error = ext4_handle_dirty_xattr_block(handle, inode, bs->bh); if (error) goto cleanup; goto inserted; } else { int offset = (char *)s->here - bs->bh->b_data; unlock_buffer(bs->bh); ea_bdebug(bs->bh, "cloning"); s->base = kmalloc(bs->bh->b_size, GFP_NOFS); error = -ENOMEM; if (s->base == NULL) goto cleanup; memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); s->first = ENTRY(header(s->base)+1); header(s->base)->h_refcount = cpu_to_le32(1); s->here = ENTRY(s->base + offset); s->end = s->base + bs->bh->b_size; } } else { /* Allocate a buffer where we construct the new block. */ s->base = kzalloc(sb->s_blocksize, GFP_NOFS); /* assert(header == s->base) */ error = -ENOMEM; if (s->base == NULL) goto cleanup; header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); header(s->base)->h_blocks = cpu_to_le32(1); header(s->base)->h_refcount = cpu_to_le32(1); s->first = ENTRY(header(s->base)+1); s->here = ENTRY(header(s->base)+1); s->end = s->base + sb->s_blocksize; } error = ext4_xattr_set_entry(i, s); if (error == -EFSCORRUPTED) goto bad_block; if (error) goto cleanup; if (!IS_LAST_ENTRY(s->first)) ext4_xattr_rehash(header(s->base), s->here); inserted: if (!IS_LAST_ENTRY(s->first)) { new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce); if (new_bh) { /* We found an identical block in the cache. */ if (new_bh == bs->bh) ea_bdebug(new_bh, "keeping"); else { /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); if (error) goto cleanup; BUFFER_TRACE(new_bh, "get_write_access"); error = ext4_journal_get_write_access(handle, new_bh); if (error) goto cleanup_dquot; lock_buffer(new_bh); /* * We have to be careful about races with * freeing or rehashing of xattr block. Once we * hold buffer lock xattr block's state is * stable so we can check whether the block got * freed / rehashed or not. Since we unhash * mbcache entry under buffer lock when freeing * / rehashing xattr block, checking whether * entry is still hashed is reliable. */ if (hlist_bl_unhashed(&ce->e_hash_list)) { /* * Undo everything and check mbcache * again. */ unlock_buffer(new_bh); dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); brelse(new_bh); mb2_cache_entry_put(ext4_mb_cache, ce); ce = NULL; new_bh = NULL; goto inserted; } le32_add_cpu(&BHDR(new_bh)->h_refcount, 1); ea_bdebug(new_bh, "reusing; refcount now=%d", le32_to_cpu(BHDR(new_bh)->h_refcount)); unlock_buffer(new_bh); error = ext4_handle_dirty_xattr_block(handle, inode, new_bh); if (error) goto cleanup_dquot; } mb2_cache_entry_touch(ext4_mb_cache, ce); mb2_cache_entry_put(ext4_mb_cache, ce); ce = NULL; } else if (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */ ea_bdebug(bs->bh, "keeping this block"); new_bh = bs->bh; get_bh(new_bh); } else { /* We need to allocate a new block */ ext4_fsblk_t goal, block; goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); /* non-extent files can't have physical blocks past 2^32 */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; block = ext4_new_meta_blocks(handle, inode, goal, 0, NULL, &error); if (error) goto cleanup; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); ea_idebug(inode, "creating block %llu", (unsigned long long)block); new_bh = sb_getblk(sb, block); if (unlikely(!new_bh)) { error = -ENOMEM; getblk_failed: ext4_free_blocks(handle, inode, NULL, block, 1, EXT4_FREE_BLOCKS_METADATA); goto cleanup; } lock_buffer(new_bh); error = ext4_journal_get_create_access(handle, new_bh); if (error) { unlock_buffer(new_bh); error = -EIO; goto getblk_failed; } memcpy(new_bh->b_data, s->base, new_bh->b_size); set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext4_xattr_cache_insert(ext4_mb_cache, new_bh); error = ext4_handle_dirty_xattr_block(handle, inode, new_bh); if (error) goto cleanup; } } /* Update the inode. */ EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; /* Drop the previous xattr block. */ if (bs->bh && bs->bh != new_bh) ext4_xattr_release_block(handle, inode, bs->bh); error = 0; cleanup: if (ce) mb2_cache_entry_put(ext4_mb_cache, ce); brelse(new_bh); if (!(bs->bh && s->base == bs->bh->b_data)) kfree(s->base); return error; cleanup_dquot: dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1)); goto cleanup; bad_block: EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); goto cleanup; #undef header } int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_inode *raw_inode; int error; if (EXT4_I(inode)->i_extra_isize == 0) return 0; raw_inode = ext4_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); is->s.here = is->s.first; is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { error = ext4_xattr_check_names(IFIRST(header), is->s.end, IFIRST(header)); if (error) return error; /* Find the named attribute. */ error = ext4_xattr_find_entry(&is->s.here, i->name_index, i->name, is->s.end - (void *)is->s.base, 0); if (error && error != -ENODATA) return error; is->s.not_found = error; } return 0; } int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_search *s = &is->s; int error; if (EXT4_I(inode)->i_extra_isize == 0) return -ENOSPC; error = ext4_xattr_set_entry(i, s); if (error) { if (error == -ENOSPC && ext4_has_inline_data(inode)) { error = ext4_try_to_evict_inline_data(handle, inode, EXT4_XATTR_LEN(strlen(i->name) + EXT4_XATTR_SIZE(i->value_len))); if (error) return error; error = ext4_xattr_ibody_find(inode, i, is); if (error) return error; error = ext4_xattr_set_entry(i, s); } if (error) return error; } header = IHDR(inode, ext4_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); ext4_set_inode_state(inode, EXT4_STATE_XATTR); } else { header->h_magic = cpu_to_le32(0); ext4_clear_inode_state(inode, EXT4_STATE_XATTR); } return 0; } static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_search *s = &is->s; int error; if (EXT4_I(inode)->i_extra_isize == 0) return -ENOSPC; error = ext4_xattr_set_entry(i, s); if (error) return error; header = IHDR(inode, ext4_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); ext4_set_inode_state(inode, EXT4_STATE_XATTR); } else { header->h_magic = cpu_to_le32(0); ext4_clear_inode_state(inode, EXT4_STATE_XATTR); } return 0; } /* * ext4_xattr_set_handle() * * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { struct ext4_xattr_info i = { .name_index = name_index, .name = name, .value = value, .value_len = value_len, }; struct ext4_xattr_ibody_find is = { .s = { .not_found = -ENODATA, }, }; struct ext4_xattr_block_find bs = { .s = { .not_found = -ENODATA, }, }; unsigned long no_expand; int error; if (!name) return -EINVAL; if (strlen(name) > 255) return -ERANGE; down_write(&EXT4_I(inode)->xattr_sem); no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); error = ext4_reserve_inode_write(handle, inode, &is.iloc); if (error) goto cleanup; if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) { struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); ext4_clear_inode_state(inode, EXT4_STATE_NEW); } error = ext4_xattr_ibody_find(inode, &i, &is); if (error) goto cleanup; if (is.s.not_found) error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; if (is.s.not_found && bs.s.not_found) { error = -ENODATA; if (flags & XATTR_REPLACE) goto cleanup; error = 0; if (!value) goto cleanup; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto cleanup; } if (!value) { if (!is.s.not_found) error = ext4_xattr_ibody_set(handle, inode, &i, &is); else if (!bs.s.not_found) error = ext4_xattr_block_set(handle, inode, &i, &bs); } else { error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (!error && !bs.s.not_found) { i.value = NULL; error = ext4_xattr_block_set(handle, inode, &i, &bs); } else if (error == -ENOSPC) { if (EXT4_I(inode)->i_file_acl && !bs.s.base) { error = ext4_xattr_block_find(inode, &i, &bs); if (error) goto cleanup; } error = ext4_xattr_block_set(handle, inode, &i, &bs); if (error) goto cleanup; if (!is.s.not_found) { i.value = NULL; error = ext4_xattr_ibody_set(handle, inode, &i, &is); } } } if (!error) { ext4_xattr_update_super_block(handle, inode->i_sb); inode->i_ctime = ext4_current_time(inode); if (!value) ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); /* * The bh is consumed by ext4_mark_iloc_dirty, even with * error != 0. */ is.iloc.bh = NULL; if (IS_SYNC(inode)) ext4_handle_sync(handle); } cleanup: brelse(is.iloc.bh); brelse(bs.bh); if (no_expand == 0) ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); up_write(&EXT4_I(inode)->xattr_sem); return error; } /* * ext4_xattr_set() * * Like ext4_xattr_set_handle, but start from an inode. This extended * attribute modification is a filesystem transaction by itself. * * Returns 0, or a negative error number on failure. */ int ext4_xattr_set(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, int flags) { handle_t *handle; int error, retries = 0; int credits = ext4_jbd2_credits_xattr(inode); retry: handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); if (IS_ERR(handle)) { error = PTR_ERR(handle); } else { int error2; error = ext4_xattr_set_handle(handle, inode, name_index, name, value, value_len, flags); error2 = ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; if (error == 0) error = error2; } return error; } /* * Shift the EA entries in the inode to create space for the increased * i_extra_isize. */ static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry, int value_offs_shift, void *to, void *from, size_t n, int blocksize) { struct ext4_xattr_entry *last = entry; int new_offs; /* Adjust the value offsets of the entries */ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { if (!last->e_value_block && last->e_value_size) { new_offs = le16_to_cpu(last->e_value_offs) + value_offs_shift; BUG_ON(new_offs + le32_to_cpu(last->e_value_size) > blocksize); last->e_value_offs = cpu_to_le16(new_offs); } } /* Shift the entries by n bytes */ memmove(to, from, n); } /* * Expand an inode by new_extra_isize bytes when EAs are present. * Returns 0 on success or negative error number on failure. */ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, struct ext4_inode *raw_inode, handle_t *handle) { struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry, *last, *first; struct buffer_head *bh = NULL; struct ext4_xattr_ibody_find *is = NULL; struct ext4_xattr_block_find *bs = NULL; char *buffer = NULL, *b_entry_name = NULL; size_t min_offs, free; int total_ino; void *base, *start, *end; int extra_isize = 0, error = 0, tried_min_extra_isize = 0; int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); down_write(&EXT4_I(inode)->xattr_sem); retry: if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) { up_write(&EXT4_I(inode)->xattr_sem); return 0; } header = IHDR(inode, raw_inode); entry = IFIRST(header); /* * Check if enough free space is available in the inode to shift the * entries ahead by new_extra_isize. */ base = start = entry; end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; min_offs = end - base; last = entry; total_ino = sizeof(struct ext4_xattr_ibody_header); free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); if (free >= new_extra_isize) { entry = IFIRST(header); ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize - new_extra_isize, (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize, (void *)header, total_ino, inode->i_sb->s_blocksize); EXT4_I(inode)->i_extra_isize = new_extra_isize; error = 0; goto cleanup; } /* * Enough free space isn't available in the inode, check if * EA block can hold new_extra_isize bytes. */ if (EXT4_I(inode)->i_file_acl) { bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); error = -EIO; if (!bh) goto cleanup; if (ext4_xattr_check_block(inode, bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EFSCORRUPTED; goto cleanup; } base = BHDR(bh); first = BFIRST(bh); end = bh->b_data + bh->b_size; min_offs = end - base; free = ext4_xattr_free_space(first, &min_offs, base, NULL); if (free < new_extra_isize) { if (!tried_min_extra_isize && s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; brelse(bh); goto retry; } error = -1; goto cleanup; } } else { free = inode->i_sb->s_blocksize; } while (new_extra_isize > 0) { size_t offs, size, entry_size; struct ext4_xattr_entry *small_entry = NULL; struct ext4_xattr_info i = { .value = NULL, .value_len = 0, }; unsigned int total_size; /* EA entry size + value size */ unsigned int shift_bytes; /* No. of bytes to shift EAs by? */ unsigned int min_total_size = ~0U; is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); if (!is || !bs) { error = -ENOMEM; goto cleanup; } is->s.not_found = -ENODATA; bs->s.not_found = -ENODATA; is->iloc.bh = NULL; bs->bh = NULL; last = IFIRST(header); /* Find the entry best suited to be pushed into EA block */ entry = NULL; for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { total_size = EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) + EXT4_XATTR_LEN(last->e_name_len); if (total_size <= free && total_size < min_total_size) { if (total_size < new_extra_isize) { small_entry = last; } else { entry = last; min_total_size = total_size; } } } if (entry == NULL) { if (small_entry) { entry = small_entry; } else { if (!tried_min_extra_isize && s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; kfree(is); is = NULL; kfree(bs); bs = NULL; brelse(bh); goto retry; } error = -1; goto cleanup; } } offs = le16_to_cpu(entry->e_value_offs); size = le32_to_cpu(entry->e_value_size); entry_size = EXT4_XATTR_LEN(entry->e_name_len); i.name_index = entry->e_name_index, buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS); b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); if (!buffer || !b_entry_name) { error = -ENOMEM; goto cleanup; } /* Save the entry name and the entry value */ memcpy(buffer, (void *)IFIRST(header) + offs, EXT4_XATTR_SIZE(size)); memcpy(b_entry_name, entry->e_name, entry->e_name_len); b_entry_name[entry->e_name_len] = '\0'; i.name = b_entry_name; error = ext4_get_inode_loc(inode, &is->iloc); if (error) goto cleanup; error = ext4_xattr_ibody_find(inode, &i, is); if (error) goto cleanup; /* Remove the chosen entry from the inode */ error = ext4_xattr_ibody_set(handle, inode, &i, is); if (error) goto cleanup; entry = IFIRST(header); if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize) shift_bytes = new_extra_isize; else shift_bytes = entry_size + size; /* Adjust the offsets and shift the remaining entries ahead */ ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize - shift_bytes, (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes, (void *)header, total_ino - entry_size, inode->i_sb->s_blocksize); extra_isize += shift_bytes; new_extra_isize -= shift_bytes; EXT4_I(inode)->i_extra_isize = extra_isize; i.name = b_entry_name; i.value = buffer; i.value_len = size; error = ext4_xattr_block_find(inode, &i, bs); if (error) goto cleanup; /* Add entry which was removed from the inode into the block */ error = ext4_xattr_block_set(handle, inode, &i, bs); if (error) goto cleanup; kfree(b_entry_name); kfree(buffer); b_entry_name = NULL; buffer = NULL; brelse(is->iloc.bh); kfree(is); kfree(bs); } brelse(bh); up_write(&EXT4_I(inode)->xattr_sem); return 0; cleanup: kfree(b_entry_name); kfree(buffer); if (is) brelse(is->iloc.bh); kfree(is); kfree(bs); brelse(bh); up_write(&EXT4_I(inode)->xattr_sem); return error; } /* * ext4_xattr_delete_inode() * * Free extended attribute resources associated with this inode. This * is called immediately before an inode is freed. We have exclusive * access to the inode. */ void ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) { struct buffer_head *bh = NULL; if (!EXT4_I(inode)->i_file_acl) goto cleanup; bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); if (!bh) { EXT4_ERROR_INODE(inode, "block %llu read error", EXT4_I(inode)->i_file_acl); goto cleanup; } if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); goto cleanup; } ext4_xattr_release_block(handle, inode, bh); EXT4_I(inode)->i_file_acl = 0; cleanup: brelse(bh); } /* * ext4_xattr_cache_insert() * * Create a new entry in the extended attribute cache, and insert * it unless such an entry is already in the cache. * * Returns 0, or a negative error number on failure. */ static void ext4_xattr_cache_insert(struct mb2_cache *ext4_mb_cache, struct buffer_head *bh) { __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); int error; error = mb2_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash, bh->b_blocknr); if (error) { if (error == -EBUSY) ea_bdebug(bh, "already in cache"); } else ea_bdebug(bh, "inserting [%x]", (int)hash); } /* * ext4_xattr_cmp() * * Compare two extended attribute blocks for equality. * * Returns 0 if the blocks are equal, 1 if they differ, and * a negative error number on errors. */ static int ext4_xattr_cmp(struct ext4_xattr_header *header1, struct ext4_xattr_header *header2) { struct ext4_xattr_entry *entry1, *entry2; entry1 = ENTRY(header1+1); entry2 = ENTRY(header2+1); while (!IS_LAST_ENTRY(entry1)) { if (IS_LAST_ENTRY(entry2)) return 1; if (entry1->e_hash != entry2->e_hash || entry1->e_name_index != entry2->e_name_index || entry1->e_name_len != entry2->e_name_len || entry1->e_value_size != entry2->e_value_size || memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) return 1; if (entry1->e_value_block != 0 || entry2->e_value_block != 0) return -EFSCORRUPTED; if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), (char *)header2 + le16_to_cpu(entry2->e_value_offs), le32_to_cpu(entry1->e_value_size))) return 1; entry1 = EXT4_XATTR_NEXT(entry1); entry2 = EXT4_XATTR_NEXT(entry2); } if (!IS_LAST_ENTRY(entry2)) return 1; return 0; } /* * ext4_xattr_cache_find() * * Find an identical extended attribute block. * * Returns a pointer to the block found, or NULL if such a block was * not found or an error occurred. */ static struct buffer_head * ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, struct mb2_cache_entry **pce) { __u32 hash = le32_to_cpu(header->h_hash); struct mb2_cache_entry *ce; struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); if (!header->h_hash) return NULL; /* never share */ ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); ce = mb2_cache_entry_find_first(ext4_mb_cache, hash); while (ce) { struct buffer_head *bh; bh = sb_bread(inode->i_sb, ce->e_block); if (!bh) { EXT4_ERROR_INODE(inode, "block %lu read error", (unsigned long) ce->e_block); } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= EXT4_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %lu refcount %d>=%d", (unsigned long) ce->e_block, le32_to_cpu(BHDR(bh)->h_refcount), EXT4_XATTR_REFCOUNT_MAX); } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { *pce = ce; return bh; } brelse(bh); ce = mb2_cache_entry_find_next(ext4_mb_cache, ce); } return NULL; } #define NAME_HASH_SHIFT 5 #define VALUE_HASH_SHIFT 16 /* * ext4_xattr_hash_entry() * * Compute the hash of an extended attribute. */ static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header, struct ext4_xattr_entry *entry) { __u32 hash = 0; char *name = entry->e_name; int n; for (n = 0; n < entry->e_name_len; n++) { hash = (hash << NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ *name++; } if (entry->e_value_block == 0 && entry->e_value_size != 0) { __le32 *value = (__le32 *)((char *)header + le16_to_cpu(entry->e_value_offs)); for (n = (le32_to_cpu(entry->e_value_size) + EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) { hash = (hash << VALUE_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ le32_to_cpu(*value++); } } entry->e_hash = cpu_to_le32(hash); } #undef NAME_HASH_SHIFT #undef VALUE_HASH_SHIFT #define BLOCK_HASH_SHIFT 16 /* * ext4_xattr_rehash() * * Re-compute the extended attribute hash value after an entry has changed. */ static void ext4_xattr_rehash(struct ext4_xattr_header *header, struct ext4_xattr_entry *entry) { struct ext4_xattr_entry *here; __u32 hash = 0; ext4_xattr_hash_entry(header, entry); here = ENTRY(header+1); while (!IS_LAST_ENTRY(here)) { if (!here->e_hash) { /* Block is not shared if an entry's hash value == 0 */ hash = 0; break; } hash = (hash << BLOCK_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ le32_to_cpu(here->e_hash); here = EXT4_XATTR_NEXT(here); } header->h_hash = cpu_to_le32(hash); } #undef BLOCK_HASH_SHIFT #define HASH_BUCKET_BITS 10 struct mb2_cache * ext4_xattr_create_cache(void) { return mb2_cache_create(HASH_BUCKET_BITS); } void ext4_xattr_destroy_cache(struct mb2_cache *cache) { if (cache) mb2_cache_destroy(cache); }
./CrossVul/dataset_final_sorted/CWE-19/c/good_1842_2
crossvul-cpp_data_bad_1487_0
/* * Copyright (c) 2007-2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/module.h> #include <linux/init.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/net_namespace.h> #include <net/sock.h> static LIST_HEAD(nf_tables_expressions); /** * nft_register_afinfo - register nf_tables address family info * * @afi: address family info to register * * Register the address family for use with nf_tables. Returns zero on * success or a negative errno code otherwise. */ int nft_register_afinfo(struct net *net, struct nft_af_info *afi) { INIT_LIST_HEAD(&afi->tables); nfnl_lock(NFNL_SUBSYS_NFTABLES); list_add_tail_rcu(&afi->list, &net->nft.af_info); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } EXPORT_SYMBOL_GPL(nft_register_afinfo); /** * nft_unregister_afinfo - unregister nf_tables address family info * * @afi: address family info to unregister * * Unregister the address family for use with nf_tables. */ void nft_unregister_afinfo(struct nft_af_info *afi) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_del_rcu(&afi->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_afinfo); static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family) { struct nft_af_info *afi; list_for_each_entry(afi, &net->nft.af_info, list) { if (afi->family == family) return afi; } return NULL; } static struct nft_af_info * nf_tables_afinfo_lookup(struct net *net, int family, bool autoload) { struct nft_af_info *afi; afi = nft_afinfo_lookup(net, family); if (afi != NULL) return afi; #ifdef CONFIG_MODULES if (autoload) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-afinfo-%u", family); nfnl_lock(NFNL_SUBSYS_NFTABLES); afi = nft_afinfo_lookup(net, family); if (afi != NULL) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-EAFNOSUPPORT); } static void nft_ctx_init(struct nft_ctx *ctx, const struct sk_buff *skb, const struct nlmsghdr *nlh, struct nft_af_info *afi, struct nft_table *table, struct nft_chain *chain, const struct nlattr * const *nla) { ctx->net = sock_net(skb->sk); ctx->afi = afi; ctx->table = table; ctx->chain = chain; ctx->nla = nla; ctx->portid = NETLINK_CB(skb).portid; ctx->report = nlmsg_report(nlh); ctx->seq = nlh->nlmsg_seq; } static struct nft_trans *nft_trans_alloc(struct nft_ctx *ctx, int msg_type, u32 size) { struct nft_trans *trans; trans = kzalloc(sizeof(struct nft_trans) + size, GFP_KERNEL); if (trans == NULL) return NULL; trans->msg_type = msg_type; trans->ctx = *ctx; return trans; } static void nft_trans_destroy(struct nft_trans *trans) { list_del(&trans->list); kfree(trans); } static void nf_tables_unregister_hooks(const struct nft_table *table, const struct nft_chain *chain, unsigned int hook_nops) { if (!(table->flags & NFT_TABLE_F_DORMANT) && chain->flags & NFT_BASE_CHAIN) nf_unregister_hooks(nft_base_chain(chain)->ops, hook_nops); } /* Internal table flags */ #define NFT_TABLE_INACTIVE (1 << 15) static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table)); if (trans == NULL) return -ENOMEM; if (msg_type == NFT_MSG_NEWTABLE) ctx->table->flags |= NFT_TABLE_INACTIVE; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; } static int nft_deltable(struct nft_ctx *ctx) { int err; err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE); if (err < 0) return err; list_del_rcu(&ctx->table->list); return err; } static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); if (trans == NULL) return -ENOMEM; if (msg_type == NFT_MSG_NEWCHAIN) ctx->chain->flags |= NFT_CHAIN_INACTIVE; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; } static int nft_delchain(struct nft_ctx *ctx) { int err; err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); if (err < 0) return err; ctx->table->use--; list_del_rcu(&ctx->chain->list); return err; } static inline bool nft_rule_is_active(struct net *net, const struct nft_rule *rule) { return (rule->genmask & (1 << net->nft.gencursor)) == 0; } static inline int gencursor_next(struct net *net) { return net->nft.gencursor+1 == 1 ? 1 : 0; } static inline int nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) { return (rule->genmask & (1 << gencursor_next(net))) == 0; } static inline void nft_rule_activate_next(struct net *net, struct nft_rule *rule) { /* Now inactive, will be active in the future */ rule->genmask = (1 << net->nft.gencursor); } static inline void nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) { rule->genmask = (1 << gencursor_next(net)); } static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) { rule->genmask = 0; } static int nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) { /* You cannot delete the same rule twice */ if (nft_rule_is_active_next(ctx->net, rule)) { nft_rule_deactivate_next(ctx->net, rule); ctx->chain->use--; return 0; } return -ENOENT; } static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, struct nft_rule *rule) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule)); if (trans == NULL) return NULL; nft_trans_rule(trans) = rule; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return trans; } static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_trans *trans; int err; trans = nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule); if (trans == NULL) return -ENOMEM; err = nf_tables_delrule_deactivate(ctx, rule); if (err < 0) { nft_trans_destroy(trans); return err; } return 0; } static int nft_delrule_by_chain(struct nft_ctx *ctx) { struct nft_rule *rule; int err; list_for_each_entry(rule, &ctx->chain->rules, list) { err = nft_delrule(ctx, rule); if (err < 0) return err; } return 0; } /* Internal set flag */ #define NFT_SET_INACTIVE (1 << 15) static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, struct nft_set *set) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); if (trans == NULL) return -ENOMEM; if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { nft_trans_set_id(trans) = ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); set->flags |= NFT_SET_INACTIVE; } nft_trans_set(trans) = set; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; } static int nft_delset(struct nft_ctx *ctx, struct nft_set *set) { int err; err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set); if (err < 0) return err; list_del_rcu(&set->list); ctx->table->use--; return err; } /* * Tables */ static struct nft_table *nft_table_lookup(const struct nft_af_info *afi, const struct nlattr *nla) { struct nft_table *table; list_for_each_entry(table, &afi->tables, list) { if (!nla_strcmp(nla, table->name)) return table; } return NULL; } static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi, const struct nlattr *nla) { struct nft_table *table; if (nla == NULL) return ERR_PTR(-EINVAL); table = nft_table_lookup(afi, nla); if (table != NULL) return table; return ERR_PTR(-ENOENT); } static inline u64 nf_tables_alloc_handle(struct nft_table *table) { return ++table->hgenerator; } static const struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX]; static const struct nf_chain_type * __nf_tables_chain_type_lookup(int family, const struct nlattr *nla) { int i; for (i = 0; i < NFT_CHAIN_T_MAX; i++) { if (chain_type[family][i] != NULL && !nla_strcmp(nla, chain_type[family][i]->name)) return chain_type[family][i]; } return NULL; } static const struct nf_chain_type * nf_tables_chain_type_lookup(const struct nft_af_info *afi, const struct nlattr *nla, bool autoload) { const struct nf_chain_type *type; type = __nf_tables_chain_type_lookup(afi->family, nla); if (type != NULL) return type; #ifdef CONFIG_MODULES if (autoload) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-chain-%u-%.*s", afi->family, nla_len(nla), (const char *)nla_data(nla)); nfnl_lock(NFNL_SUBSYS_NFTABLES); type = __nf_tables_chain_type_lookup(afi->family, nla); if (type != NULL) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-ENOENT); } static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { [NFTA_TABLE_NAME] = { .type = NLA_STRING }, [NFTA_TABLE_FLAGS] = { .type = NLA_U32 }, }; static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) || nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) || nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq, event, 0, ctx->afi->family, ctx->table); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_dump_tables(struct sk_buff *skb, struct netlink_callback *cb) { const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); const struct nft_af_info *afi; const struct nft_table *table; unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; list_for_each_entry_rcu(table, &afi->tables, list) { if (idx < s_idx) goto cont; if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (nf_tables_fill_table_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWTABLE, NLM_F_MULTI, afi->family, table) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } done: rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_af_info *afi; const struct nft_table *table; struct sk_buff *skb2; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_tables, }; return netlink_dump_start(nlsk, skb, nlh, &c); } afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; err = nf_tables_fill_table_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static int nf_tables_table_enable(const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain; int err, i = 0; list_for_each_entry(chain, &table->chains, list) { if (!(chain->flags & NFT_BASE_CHAIN)) continue; err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops); if (err < 0) goto err; i++; } return 0; err: list_for_each_entry(chain, &table->chains, list) { if (!(chain->flags & NFT_BASE_CHAIN)) continue; if (i-- <= 0) break; nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops); } return err; } static void nf_tables_table_disable(const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain; list_for_each_entry(chain, &table->chains, list) { if (chain->flags & NFT_BASE_CHAIN) nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops); } } static int nf_tables_updtable(struct nft_ctx *ctx) { struct nft_trans *trans; u32 flags; int ret = 0; if (!ctx->nla[NFTA_TABLE_FLAGS]) return 0; flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS])); if (flags & ~NFT_TABLE_F_DORMANT) return -EINVAL; if (flags == ctx->table->flags) return 0; trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, sizeof(struct nft_trans_table)); if (trans == NULL) return -ENOMEM; if ((flags & NFT_TABLE_F_DORMANT) && !(ctx->table->flags & NFT_TABLE_F_DORMANT)) { nft_trans_table_enable(trans) = false; } else if (!(flags & NFT_TABLE_F_DORMANT) && ctx->table->flags & NFT_TABLE_F_DORMANT) { ret = nf_tables_table_enable(ctx->afi, ctx->table); if (ret >= 0) { ctx->table->flags &= ~NFT_TABLE_F_DORMANT; nft_trans_table_enable(trans) = true; } } if (ret < 0) goto err; nft_trans_table_update(trans) = true; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err: nft_trans_destroy(trans); return ret; } static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nlattr *name; struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; u32 flags = 0; struct nft_ctx ctx; int err; afi = nf_tables_afinfo_lookup(net, family, true); if (IS_ERR(afi)) return PTR_ERR(afi); name = nla[NFTA_TABLE_NAME]; table = nf_tables_table_lookup(afi, name); if (IS_ERR(table)) { if (PTR_ERR(table) != -ENOENT) return PTR_ERR(table); table = NULL; } if (table != NULL) { if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); return nf_tables_updtable(&ctx); } if (nla[NFTA_TABLE_FLAGS]) { flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS])); if (flags & ~NFT_TABLE_F_DORMANT) return -EINVAL; } if (!try_module_get(afi->owner)) return -EAFNOSUPPORT; table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL); if (table == NULL) { module_put(afi->owner); return -ENOMEM; } nla_strlcpy(table->name, name, nla_len(name)); INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); table->flags = flags; nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) { kfree(table); module_put(afi->owner); return err; } list_add_tail_rcu(&table->list, &afi->tables); return 0; } static int nft_flush_table(struct nft_ctx *ctx) { int err; struct nft_chain *chain, *nc; struct nft_set *set, *ns; list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delrule_by_chain(ctx); if (err < 0) goto out; err = nft_delchain(ctx); if (err < 0) goto out; } list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { if (set->flags & NFT_SET_ANONYMOUS && !list_empty(&set->bindings)) continue; err = nft_delset(ctx, set); if (err < 0) goto out; } err = nft_deltable(ctx); out: return err; } static int nft_flush(struct nft_ctx *ctx, int family) { struct nft_af_info *afi; struct nft_table *table, *nt; const struct nlattr * const *nla = ctx->nla; int err = 0; list_for_each_entry(afi, &ctx->net->nft.af_info, list) { if (family != AF_UNSPEC && afi->family != family) continue; ctx->afi = afi; list_for_each_entry_safe(table, nt, &afi->tables, list) { if (nla[NFTA_TABLE_NAME] && nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) continue; ctx->table = table; err = nft_flush_table(ctx); if (err < 0) goto out; } } out: return err; } static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) return nft_flush(&ctx, family); afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; ctx.afi = afi; ctx.table = table; return nft_flush_table(&ctx); } static void nf_tables_table_destroy(struct nft_ctx *ctx) { BUG_ON(ctx->table->use > 0); kfree(ctx->table); module_put(ctx->afi->owner); } int nft_register_chain_type(const struct nf_chain_type *ctype) { int err = 0; nfnl_lock(NFNL_SUBSYS_NFTABLES); if (chain_type[ctype->family][ctype->type] != NULL) { err = -EBUSY; goto out; } chain_type[ctype->family][ctype->type] = ctype; out: nfnl_unlock(NFNL_SUBSYS_NFTABLES); return err; } EXPORT_SYMBOL_GPL(nft_register_chain_type); void nft_unregister_chain_type(const struct nf_chain_type *ctype) { nfnl_lock(NFNL_SUBSYS_NFTABLES); chain_type[ctype->family][ctype->type] = NULL; nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_chain_type); /* * Chains */ static struct nft_chain * nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle) { struct nft_chain *chain; list_for_each_entry(chain, &table->chains, list) { if (chain->handle == handle) return chain; } return ERR_PTR(-ENOENT); } static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, const struct nlattr *nla) { struct nft_chain *chain; if (nla == NULL) return ERR_PTR(-EINVAL); list_for_each_entry(chain, &table->chains, list) { if (!nla_strcmp(nla, chain->name)) return chain; } return ERR_PTR(-ENOENT); } static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, [NFTA_CHAIN_NAME] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED }, [NFTA_CHAIN_POLICY] = { .type = NLA_U32 }, [NFTA_CHAIN_TYPE] = { .type = NLA_STRING }, [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, }; static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = { [NFTA_HOOK_HOOKNUM] = { .type = NLA_U32 }, [NFTA_HOOK_PRIORITY] = { .type = NLA_U32 }, }; static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) { struct nft_stats *cpu_stats, total; struct nlattr *nest; unsigned int seq; u64 pkts, bytes; int cpu; memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); do { seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); pkts = cpu_stats->pkts; bytes = cpu_stats->bytes; } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); total.pkts += pkts; total.bytes += bytes; } nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); if (nest == NULL) goto nla_put_failure; if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) || nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes))) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: return -ENOSPC; } static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table, const struct nft_chain *chain) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle))) goto nla_put_failure; if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name)) goto nla_put_failure; if (chain->flags & NFT_BASE_CHAIN) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops[0]; struct nlattr *nest; nest = nla_nest_start(skb, NFTA_CHAIN_HOOK); if (nest == NULL) goto nla_put_failure; if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority))) goto nla_put_failure; nla_nest_end(skb, nest); if (nla_put_be32(skb, NFTA_CHAIN_POLICY, htonl(basechain->policy))) goto nla_put_failure; if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure; if (nft_dump_stats(skb, nft_base_chain(chain)->stats)) goto nla_put_failure; } if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, event, 0, ctx->afi->family, ctx->table, ctx->chain); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_dump_chains(struct sk_buff *skb, struct netlink_callback *cb) { const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(chain, &table->chains, list) { if (idx < s_idx) goto cont; if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (nf_tables_fill_chain_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, NLM_F_MULTI, afi->family, table, chain) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } } done: rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; struct sk_buff *skb2; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_chains, }; return netlink_dump_start(nlsk, skb, nlh, &c); } afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = { [NFTA_COUNTER_PACKETS] = { .type = NLA_U64 }, [NFTA_COUNTER_BYTES] = { .type = NLA_U64 }, }; static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) { struct nlattr *tb[NFTA_COUNTER_MAX+1]; struct nft_stats __percpu *newstats; struct nft_stats *stats; int err; err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy); if (err < 0) return ERR_PTR(err); if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) return ERR_PTR(-EINVAL); newstats = netdev_alloc_pcpu_stats(struct nft_stats); if (newstats == NULL) return ERR_PTR(-ENOMEM); /* Restore old counters on this cpu, no problem. Per-cpu statistics * are not exposed to userspace. */ stats = this_cpu_ptr(newstats); stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); return newstats; } static void nft_chain_stats_replace(struct nft_base_chain *chain, struct nft_stats __percpu *newstats) { if (newstats == NULL) return; if (chain->stats) { struct nft_stats __percpu *oldstats = nft_dereference(chain->stats); rcu_assign_pointer(chain->stats, newstats); synchronize_rcu(); free_percpu(oldstats); } else rcu_assign_pointer(chain->stats, newstats); } static void nf_tables_chain_destroy(struct nft_chain *chain) { BUG_ON(chain->use > 0); if (chain->flags & NFT_BASE_CHAIN) { module_put(nft_base_chain(chain)->type->owner); free_percpu(nft_base_chain(chain)->stats); kfree(nft_base_chain(chain)); } else { kfree(chain); } } static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nlattr * uninitialized_var(name); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; struct nft_base_chain *basechain = NULL; struct nlattr *ha[NFTA_HOOK_MAX + 1]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; u8 policy = NF_ACCEPT; u64 handle = 0; unsigned int i; struct nft_stats __percpu *stats; int err; bool create; struct nft_ctx ctx; create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; afi = nf_tables_afinfo_lookup(net, family, true); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); chain = NULL; name = nla[NFTA_CHAIN_NAME]; if (nla[NFTA_CHAIN_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); chain = nf_tables_chain_lookup_byhandle(table, handle); if (IS_ERR(chain)) return PTR_ERR(chain); } else { chain = nf_tables_chain_lookup(table, name); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) return PTR_ERR(chain); chain = NULL; } } if (nla[NFTA_CHAIN_POLICY]) { if ((chain != NULL && !(chain->flags & NFT_BASE_CHAIN)) || nla[NFTA_CHAIN_HOOK] == NULL) return -EOPNOTSUPP; policy = ntohl(nla_get_be32(nla[NFTA_CHAIN_POLICY])); switch (policy) { case NF_DROP: case NF_ACCEPT: break; default: return -EINVAL; } } if (chain != NULL) { struct nft_stats *stats = NULL; struct nft_trans *trans; if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; if (nla[NFTA_CHAIN_HANDLE] && name && !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]))) return -EEXIST; if (nla[NFTA_CHAIN_COUNTERS]) { if (!(chain->flags & NFT_BASE_CHAIN)) return -EOPNOTSUPP; stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); if (IS_ERR(stats)) return PTR_ERR(stats); } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); if (trans == NULL) return -ENOMEM; nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; if (nla[NFTA_CHAIN_POLICY]) nft_trans_chain_policy(trans) = policy; else nft_trans_chain_policy(trans) = -1; if (nla[NFTA_CHAIN_HANDLE] && name) { nla_strlcpy(nft_trans_chain_name(trans), name, NFT_CHAIN_MAXNAMELEN); } list_add_tail(&trans->list, &net->nft.commit_list); return 0; } if (table->use == UINT_MAX) return -EOVERFLOW; if (nla[NFTA_CHAIN_HOOK]) { const struct nf_chain_type *type; struct nf_hook_ops *ops; nf_hookfn *hookfn; u32 hooknum, priority; type = chain_type[family][NFT_CHAIN_T_DEFAULT]; if (nla[NFTA_CHAIN_TYPE]) { type = nf_tables_chain_type_lookup(afi, nla[NFTA_CHAIN_TYPE], create); if (IS_ERR(type)) return PTR_ERR(type); } err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK], nft_hook_policy); if (err < 0) return err; if (ha[NFTA_HOOK_HOOKNUM] == NULL || ha[NFTA_HOOK_PRIORITY] == NULL) return -EINVAL; hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); if (hooknum >= afi->nhooks) return -EINVAL; priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); if (!(type->hook_mask & (1 << hooknum))) return -EOPNOTSUPP; if (!try_module_get(type->owner)) return -ENOENT; hookfn = type->hooks[hooknum]; basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); if (basechain == NULL) return -ENOMEM; if (nla[NFTA_CHAIN_COUNTERS]) { stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); if (IS_ERR(stats)) { module_put(type->owner); kfree(basechain); return PTR_ERR(stats); } basechain->stats = stats; } else { stats = netdev_alloc_pcpu_stats(struct nft_stats); if (stats == NULL) { module_put(type->owner); kfree(basechain); return -ENOMEM; } rcu_assign_pointer(basechain->stats, stats); } basechain->type = type; chain = &basechain->chain; for (i = 0; i < afi->nops; i++) { ops = &basechain->ops[i]; ops->pf = family; ops->owner = afi->owner; ops->hooknum = hooknum; ops->priority = priority; ops->priv = chain; ops->hook = afi->hooks[ops->hooknum]; if (hookfn) ops->hook = hookfn; if (afi->hook_ops_init) afi->hook_ops_init(ops, i); } chain->flags |= NFT_BASE_CHAIN; basechain->policy = policy; } else { chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) return -ENOMEM; } INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->net = net; chain->table = table; nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN); if (!(table->flags & NFT_TABLE_F_DORMANT) && chain->flags & NFT_BASE_CHAIN) { err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops); if (err < 0) goto err1; } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); if (err < 0) goto err2; table->use++; list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: nf_tables_unregister_hooks(table, chain, afi->nops); err1: nf_tables_chain_destroy(chain); return err; } static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; if (chain->use > 0) return -EBUSY; nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); return nft_delchain(&ctx); } /* * Expressions */ /** * nft_register_expr - register nf_tables expr type * @ops: expr type * * Registers the expr type for use with nf_tables. Returns zero on * success or a negative errno code otherwise. */ int nft_register_expr(struct nft_expr_type *type) { nfnl_lock(NFNL_SUBSYS_NFTABLES); if (type->family == NFPROTO_UNSPEC) list_add_tail_rcu(&type->list, &nf_tables_expressions); else list_add_rcu(&type->list, &nf_tables_expressions); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } EXPORT_SYMBOL_GPL(nft_register_expr); /** * nft_unregister_expr - unregister nf_tables expr type * @ops: expr type * * Unregisters the expr typefor use with nf_tables. */ void nft_unregister_expr(struct nft_expr_type *type) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_del_rcu(&type->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_expr); static const struct nft_expr_type *__nft_expr_type_get(u8 family, struct nlattr *nla) { const struct nft_expr_type *type; list_for_each_entry(type, &nf_tables_expressions, list) { if (!nla_strcmp(nla, type->name) && (!type->family || type->family == family)) return type; } return NULL; } static const struct nft_expr_type *nft_expr_type_get(u8 family, struct nlattr *nla) { const struct nft_expr_type *type; if (nla == NULL) return ERR_PTR(-EINVAL); type = __nft_expr_type_get(family, nla); if (type != NULL && try_module_get(type->owner)) return type; #ifdef CONFIG_MODULES if (type == NULL) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-expr-%u-%.*s", family, nla_len(nla), (char *)nla_data(nla)); nfnl_lock(NFNL_SUBSYS_NFTABLES); if (__nft_expr_type_get(family, nla)) return ERR_PTR(-EAGAIN); nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-expr-%.*s", nla_len(nla), (char *)nla_data(nla)); nfnl_lock(NFNL_SUBSYS_NFTABLES); if (__nft_expr_type_get(family, nla)) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-ENOENT); } static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = { [NFTA_EXPR_NAME] = { .type = NLA_STRING }, [NFTA_EXPR_DATA] = { .type = NLA_NESTED }, }; static int nf_tables_fill_expr_info(struct sk_buff *skb, const struct nft_expr *expr) { if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name)) goto nla_put_failure; if (expr->ops->dump) { struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA); if (data == NULL) goto nla_put_failure; if (expr->ops->dump(skb, expr) < 0) goto nla_put_failure; nla_nest_end(skb, data); } return skb->len; nla_put_failure: return -1; }; struct nft_expr_info { const struct nft_expr_ops *ops; struct nlattr *tb[NFT_EXPR_MAXATTR + 1]; }; static int nf_tables_expr_parse(const struct nft_ctx *ctx, const struct nlattr *nla, struct nft_expr_info *info) { const struct nft_expr_type *type; const struct nft_expr_ops *ops; struct nlattr *tb[NFTA_EXPR_MAX + 1]; int err; err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy); if (err < 0) return err; type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]); if (IS_ERR(type)) return PTR_ERR(type); if (tb[NFTA_EXPR_DATA]) { err = nla_parse_nested(info->tb, type->maxattr, tb[NFTA_EXPR_DATA], type->policy); if (err < 0) goto err1; } else memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1)); if (type->select_ops != NULL) { ops = type->select_ops(ctx, (const struct nlattr * const *)info->tb); if (IS_ERR(ops)) { err = PTR_ERR(ops); goto err1; } } else ops = type->ops; info->ops = ops; return 0; err1: module_put(type->owner); return err; } static int nf_tables_newexpr(const struct nft_ctx *ctx, const struct nft_expr_info *info, struct nft_expr *expr) { const struct nft_expr_ops *ops = info->ops; int err; expr->ops = ops; if (ops->init) { err = ops->init(ctx, expr, (const struct nlattr **)info->tb); if (err < 0) goto err1; } return 0; err1: expr->ops = NULL; return err; } static void nf_tables_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr) { if (expr->ops->destroy) expr->ops->destroy(ctx, expr); module_put(expr->ops->type->owner); } /* * Rules */ static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain, u64 handle) { struct nft_rule *rule; // FIXME: this sucks list_for_each_entry(rule, &chain->rules, list) { if (handle == rule->handle) return rule; } return ERR_PTR(-ENOENT); } static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain, const struct nlattr *nla) { if (nla == NULL) return ERR_PTR(-EINVAL); return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla))); } static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { [NFTA_RULE_TABLE] = { .type = NLA_STRING }, [NFTA_RULE_CHAIN] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED }, [NFTA_RULE_COMPAT] = { .type = NLA_NESTED }, [NFTA_RULE_POSITION] = { .type = NLA_U64 }, [NFTA_RULE_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, }; static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table, const struct nft_chain *chain, const struct nft_rule *rule) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; const struct nft_expr *expr, *next; struct nlattr *list; const struct nft_rule *prule; int type = event | NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_RULE_TABLE, table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle))) goto nla_put_failure; if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) { prule = list_entry(rule->list.prev, struct nft_rule, list); if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(prule->handle))) goto nla_put_failure; } list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS); if (list == NULL) goto nla_put_failure; nft_rule_for_each_expr(expr, next, rule) { struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM); if (elem == NULL) goto nla_put_failure; if (nf_tables_fill_expr_info(skb, expr) < 0) goto nla_put_failure; nla_nest_end(skb, elem); } nla_nest_end(skb, list); if (rule->ulen && nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_rule_notify(const struct nft_ctx *ctx, const struct nft_rule *rule, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq, event, 0, ctx->afi->family, ctx->table, ctx->chain, rule); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) { const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; const struct nft_rule *rule; unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(chain, &table->chains, list) { list_for_each_entry_rcu(rule, &chain->rules, list) { if (!nft_rule_is_active(net, rule)) goto cont; if (idx < s_idx) goto cont; if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWRULE, NLM_F_MULTI | NLM_F_APPEND, afi->family, table, chain, rule) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } } } done: rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; const struct nft_rule *rule; struct sk_buff *skb2; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_rules, }; return netlink_dump_start(nlsk, skb, nlh, &c); } afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); if (IS_ERR(rule)) return PTR_ERR(rule); skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_expr *expr; /* * Careful: some expressions might not be initialized in case this * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); while (expr->ops && expr != nft_expr_last(rule)) { nf_tables_expr_destroy(ctx, expr); expr = nft_expr_next(expr); } kfree(rule); } #define NFT_RULE_MAXEXPRS 128 static struct nft_expr_info *info; static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_chain *chain; struct nft_rule *rule, *old_rule = NULL; struct nft_trans *trans = NULL; struct nft_expr *expr; struct nft_ctx ctx; struct nlattr *tmp; unsigned int size, i, n, ulen = 0; int err, rem; bool create; u64 handle, pos_handle; create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); if (nla[NFTA_RULE_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE])); rule = __nf_tables_rule_lookup(chain, handle); if (IS_ERR(rule)) return PTR_ERR(rule); if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) old_rule = rule; else return -EOPNOTSUPP; } else { if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) return -EINVAL; handle = nf_tables_alloc_handle(table); if (chain->use == UINT_MAX) return -EOVERFLOW; } if (nla[NFTA_RULE_POSITION]) { if (!(nlh->nlmsg_flags & NLM_F_CREATE)) return -EOPNOTSUPP; pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); old_rule = __nf_tables_rule_lookup(chain, pos_handle); if (IS_ERR(old_rule)) return PTR_ERR(old_rule); } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); n = 0; size = 0; if (nla[NFTA_RULE_EXPRESSIONS]) { nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) { err = -EINVAL; if (nla_type(tmp) != NFTA_LIST_ELEM) goto err1; if (n == NFT_RULE_MAXEXPRS) goto err1; err = nf_tables_expr_parse(&ctx, tmp, &info[n]); if (err < 0) goto err1; size += info[n].ops->size; n++; } } if (nla[NFTA_RULE_USERDATA]) ulen = nla_len(nla[NFTA_RULE_USERDATA]); err = -ENOMEM; rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); if (rule == NULL) goto err1; nft_rule_activate_next(net, rule); rule->handle = handle; rule->dlen = size; rule->ulen = ulen; if (ulen) nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); expr = nft_expr_first(rule); for (i = 0; i < n; i++) { err = nf_tables_newexpr(&ctx, &info[i], expr); if (err < 0) goto err2; info[i].ops = NULL; expr = nft_expr_next(expr); } if (nlh->nlmsg_flags & NLM_F_REPLACE) { if (nft_rule_is_active_next(net, old_rule)) { trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, old_rule); if (trans == NULL) { err = -ENOMEM; goto err2; } nft_rule_deactivate_next(net, old_rule); chain->use--; list_add_tail_rcu(&rule->list, &old_rule->list); } else { err = -ENOENT; goto err2; } } else if (nlh->nlmsg_flags & NLM_F_APPEND) if (old_rule) list_add_rcu(&rule->list, &old_rule->list); else list_add_tail_rcu(&rule->list, &chain->rules); else { if (old_rule) list_add_tail_rcu(&rule->list, &old_rule->list); else list_add_rcu(&rule->list, &chain->rules); } if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { err = -ENOMEM; goto err3; } chain->use++; return 0; err3: list_del_rcu(&rule->list); if (trans) { list_del_rcu(&nft_trans_rule(trans)->list); nft_rule_clear(net, nft_trans_rule(trans)); nft_trans_destroy(trans); chain->use++; } err2: nf_tables_rule_destroy(&ctx, rule); err1: for (i = 0; i < n; i++) { if (info[i].ops != NULL) module_put(info[i].ops->type->owner); } return err; } static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_chain *chain = NULL; struct nft_rule *rule; int family = nfmsg->nfgen_family, err = 0; struct nft_ctx ctx; afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; if (nla[NFTA_RULE_CHAIN]) { chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); if (chain) { if (nla[NFTA_RULE_HANDLE]) { rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); if (IS_ERR(rule)) return PTR_ERR(rule); err = nft_delrule(&ctx, rule); } else { err = nft_delrule_by_chain(&ctx); } } else { list_for_each_entry(chain, &table->chains, list) { ctx.chain = chain; err = nft_delrule_by_chain(&ctx); if (err < 0) break; } } return err; } /* * Sets */ static LIST_HEAD(nf_tables_set_ops); int nft_register_set(struct nft_set_ops *ops) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_add_tail_rcu(&ops->list, &nf_tables_set_ops); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } EXPORT_SYMBOL_GPL(nft_register_set); void nft_unregister_set(struct nft_set_ops *ops) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_del_rcu(&ops->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_set); /* * Select a set implementation based on the data characteristics and the * given policy. The total memory use might not be known if no size is * given, in that case the amount of memory per element is used. */ static const struct nft_set_ops * nft_select_set_ops(const struct nlattr * const nla[], const struct nft_set_desc *desc, enum nft_set_policies policy) { const struct nft_set_ops *ops, *bops; struct nft_set_estimate est, best; u32 features; #ifdef CONFIG_MODULES if (list_empty(&nf_tables_set_ops)) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-set"); nfnl_lock(NFNL_SUBSYS_NFTABLES); if (!list_empty(&nf_tables_set_ops)) return ERR_PTR(-EAGAIN); } #endif features = 0; if (nla[NFTA_SET_FLAGS] != NULL) { features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); features &= NFT_SET_INTERVAL | NFT_SET_MAP; } bops = NULL; best.size = ~0; best.class = ~0; list_for_each_entry(ops, &nf_tables_set_ops, list) { if ((ops->features & features) != features) continue; if (!ops->estimate(desc, features, &est)) continue; switch (policy) { case NFT_SET_POL_PERFORMANCE: if (est.class < best.class) break; if (est.class == best.class && est.size < best.size) break; continue; case NFT_SET_POL_MEMORY: if (est.size < best.size) break; if (est.size == best.size && est.class < best.class) break; continue; default: break; } if (!try_module_get(ops->owner)) continue; if (bops != NULL) module_put(bops->owner); bops = ops; best = est; } if (bops != NULL) return bops; return ERR_PTR(-EOPNOTSUPP); } static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { [NFTA_SET_TABLE] = { .type = NLA_STRING }, [NFTA_SET_NAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, [NFTA_SET_FLAGS] = { .type = NLA_U32 }, [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 }, [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 }, [NFTA_SET_DATA_LEN] = { .type = NLA_U32 }, [NFTA_SET_POLICY] = { .type = NLA_U32 }, [NFTA_SET_DESC] = { .type = NLA_NESTED }, [NFTA_SET_ID] = { .type = NLA_U32 }, }; static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = { [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, }; static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, const struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { struct net *net = sock_net(skb->sk); const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi = NULL; struct nft_table *table = NULL; if (nfmsg->nfgen_family != NFPROTO_UNSPEC) { afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); if (IS_ERR(afi)) return PTR_ERR(afi); } if (nla[NFTA_SET_TABLE] != NULL) { if (afi == NULL) return -EAFNOSUPPORT; table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; } nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); return 0; } struct nft_set *nf_tables_set_lookup(const struct nft_table *table, const struct nlattr *nla) { struct nft_set *set; if (nla == NULL) return ERR_PTR(-EINVAL); list_for_each_entry(set, &table->sets, list) { if (!nla_strcmp(nla, set->name)) return set; } return ERR_PTR(-ENOENT); } struct nft_set *nf_tables_set_lookup_byid(const struct net *net, const struct nlattr *nla) { struct nft_trans *trans; u32 id = ntohl(nla_get_be32(nla)); list_for_each_entry(trans, &net->nft.commit_list, list) { if (trans->msg_type == NFT_MSG_NEWSET && id == nft_trans_set_id(trans)) return nft_trans_set(trans); } return ERR_PTR(-ENOENT); } static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, const char *name) { const struct nft_set *i; const char *p; unsigned long *inuse; unsigned int n = 0, min = 0; p = strnchr(name, IFNAMSIZ, '%'); if (p != NULL) { if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL); if (inuse == NULL) return -ENOMEM; cont: list_for_each_entry(i, &ctx->table->sets, list) { int tmp; if (!sscanf(i->name, name, &tmp)) continue; if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE) continue; set_bit(tmp - min, inuse); } n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE); if (n >= BITS_PER_BYTE * PAGE_SIZE) { min += BITS_PER_BYTE * PAGE_SIZE; memset(inuse, 0, PAGE_SIZE); goto cont; } free_page((unsigned long)inuse); } snprintf(set->name, sizeof(set->name), name, min + n); list_for_each_entry(i, &ctx->table->sets, list) { if (!strcmp(set->name, i->name)) return -ENFILE; } return 0; } static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, const struct nft_set *set, u16 event, u16 flags) { struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *desc; u32 portid = ctx->portid; u32 seq = ctx->seq; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx->afi->family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_NAME, set->name)) goto nla_put_failure; if (set->flags != 0) if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen))) goto nla_put_failure; if (set->flags & NFT_SET_MAP) { if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen))) goto nla_put_failure; } if (set->policy != NFT_SET_POL_PERFORMANCE) { if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy))) goto nla_put_failure; } desc = nla_nest_start(skb, NFTA_SET_DESC); if (desc == NULL) goto nla_put_failure; if (set->size && nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size))) goto nla_put_failure; nla_nest_end(skb, desc); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_set_notify(const struct nft_ctx *ctx, const struct nft_set *set, int event, gfp_t gfp_flags) { struct sk_buff *skb; u32 portid = ctx->portid; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags); if (skb == NULL) goto err; err = nf_tables_fill_set(skb, ctx, set, event, 0); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report, gfp_flags); err: if (err < 0) nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err); return err; } static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) { const struct nft_set *set; unsigned int idx, s_idx = cb->args[0]; struct nft_af_info *afi; struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; struct net *net = sock_net(skb->sk); int cur_family = cb->args[3]; struct nft_ctx *ctx = cb->data, ctx_set; if (cb->args[1]) return skb->len; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (ctx->afi && ctx->afi != afi) continue; if (cur_family) { if (afi->family != cur_family) continue; cur_family = 0; } list_for_each_entry_rcu(table, &afi->tables, list) { if (ctx->table && ctx->table != table) continue; if (cur_table) { if (cur_table != table) continue; cur_table = NULL; } idx = 0; list_for_each_entry_rcu(set, &table->sets, list) { if (idx < s_idx) goto cont; ctx_set = *ctx; ctx_set.table = table; ctx_set.afi = afi; if (nf_tables_fill_set(skb, &ctx_set, set, NFT_MSG_NEWSET, NLM_F_MULTI) < 0) { cb->args[0] = idx; cb->args[2] = (unsigned long) table; cb->args[3] = afi->family; goto done; } nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } if (s_idx) s_idx = 0; } } cb->args[1] = 1; done: rcu_read_unlock(); return skb->len; } static int nf_tables_dump_sets_done(struct netlink_callback *cb) { kfree(cb->data); return 0; } static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nft_set *set; struct nft_ctx ctx; struct sk_buff *skb2; const struct nfgenmsg *nfmsg = nlmsg_data(nlh); int err; /* Verify existence before starting dump */ err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); if (err < 0) return err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_sets, .done = nf_tables_dump_sets_done, }; struct nft_ctx *ctx_dump; ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_KERNEL); if (ctx_dump == NULL) return -ENOMEM; *ctx_dump = ctx; c.data = ctx_dump; return netlink_dump_start(nlsk, skb, nlh, &c); } /* Only accept unspec with dump */ if (nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) return -ENOMEM; err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *da[NFTA_SET_DESC_MAX + 1]; int err; err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy); if (err < 0) return err; if (da[NFTA_SET_DESC_SIZE] != NULL) desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE])); return 0; } static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_set_ops *ops; struct nft_af_info *afi; struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; char name[IFNAMSIZ]; unsigned int size; bool create; u32 ktype, dtype, flags, policy; struct nft_set_desc desc; int err; if (nla[NFTA_SET_TABLE] == NULL || nla[NFTA_SET_NAME] == NULL || nla[NFTA_SET_KEY_LEN] == NULL || nla[NFTA_SET_ID] == NULL) return -EINVAL; memset(&desc, 0, sizeof(desc)); ktype = NFT_DATA_VALUE; if (nla[NFTA_SET_KEY_TYPE] != NULL) { ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE])); if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK) return -EINVAL; } desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN])); if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data)) return -EINVAL; flags = 0; if (nla[NFTA_SET_FLAGS] != NULL) { flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT | NFT_SET_INTERVAL | NFT_SET_MAP)) return -EINVAL; } dtype = 0; if (nla[NFTA_SET_DATA_TYPE] != NULL) { if (!(flags & NFT_SET_MAP)) return -EINVAL; dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE])); if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK && dtype != NFT_DATA_VERDICT) return -EINVAL; if (dtype != NFT_DATA_VERDICT) { if (nla[NFTA_SET_DATA_LEN] == NULL) return -EINVAL; desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN])); if (desc.dlen == 0 || desc.dlen > FIELD_SIZEOF(struct nft_data, data)) return -EINVAL; } else desc.dlen = sizeof(struct nft_data); } else if (flags & NFT_SET_MAP) return -EINVAL; policy = NFT_SET_POL_PERFORMANCE; if (nla[NFTA_SET_POLICY] != NULL) policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY])); if (nla[NFTA_SET_DESC] != NULL) { err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]); if (err < 0) return err; } create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) { if (PTR_ERR(set) != -ENOENT) return PTR_ERR(set); set = NULL; } if (set != NULL) { if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; return 0; } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) return -ENOENT; ops = nft_select_set_ops(nla, &desc, policy); if (IS_ERR(ops)) return PTR_ERR(ops); size = 0; if (ops->privsize != NULL) size = ops->privsize(nla); err = -ENOMEM; set = kzalloc(sizeof(*set) + size, GFP_KERNEL); if (set == NULL) goto err1; nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name)); err = nf_tables_set_alloc_name(&ctx, set, name); if (err < 0) goto err2; INIT_LIST_HEAD(&set->bindings); set->ops = ops; set->ktype = ktype; set->klen = desc.klen; set->dtype = dtype; set->dlen = desc.dlen; set->flags = flags; set->size = desc.size; set->policy = policy; err = ops->init(set, &desc, nla); if (err < 0) goto err2; err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) goto err2; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; err2: kfree(set); err1: module_put(ops->owner); return err; } static void nft_set_destroy(struct nft_set *set) { set->ops->destroy(set); module_put(set->ops->owner); kfree(set); } static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) { list_del_rcu(&set->list); nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); nft_set_destroy(set); } static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_set *set; struct nft_ctx ctx; int err; if (nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; if (nla[NFTA_SET_TABLE] == NULL) return -EINVAL; err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; if (!list_empty(&set->bindings)) return -EBUSY; return nft_delset(&ctx, set); } static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) { enum nft_registers dreg; dreg = nft_type_to_reg(set->dtype); return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE); } int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { struct nft_set_binding *i; struct nft_set_iter iter; if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS) return -EBUSY; if (set->flags & NFT_SET_MAP) { /* If the set is already bound to the same chain all * jumps are already validated for that chain. */ list_for_each_entry(i, &set->bindings, list) { if (i->chain == binding->chain) goto bind; } iter.skip = 0; iter.count = 0; iter.err = 0; iter.fn = nf_tables_bind_check_setelem; set->ops->walk(ctx, set, &iter); if (iter.err < 0) { /* Destroy anonymous sets if binding fails */ if (set->flags & NFT_SET_ANONYMOUS) nf_tables_set_destroy(ctx, set); return iter.err; } } bind: binding->chain = ctx->chain; list_add_tail_rcu(&binding->list, &set->bindings); return 0; } void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { list_del_rcu(&binding->list); if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && !(set->flags & NFT_SET_INACTIVE)) nf_tables_set_destroy(ctx, set); } /* * Set elements */ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = { [NFTA_SET_ELEM_KEY] = { .type = NLA_NESTED }, [NFTA_SET_ELEM_DATA] = { .type = NLA_NESTED }, [NFTA_SET_ELEM_FLAGS] = { .type = NLA_U32 }, }; static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, }; static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, const struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], bool trans) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (!trans && (table->flags & NFT_TABLE_INACTIVE)) return -ENOENT; nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); return 0; } static int nf_tables_fill_setelem(struct sk_buff *skb, const struct nft_set *set, const struct nft_set_elem *elem) { unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; nest = nla_nest_start(skb, NFTA_LIST_ELEM); if (nest == NULL) goto nla_put_failure; if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE, set->klen) < 0) goto nla_put_failure; if (set->flags & NFT_SET_MAP && !(elem->flags & NFT_SET_ELEM_INTERVAL_END) && nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data, set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE, set->dlen) < 0) goto nla_put_failure; if (elem->flags != 0) if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags))) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nlmsg_trim(skb, b); return -EMSGSIZE; } struct nft_set_dump_args { const struct netlink_callback *cb; struct nft_set_iter iter; struct sk_buff *skb; }; static int nf_tables_dump_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) { struct nft_set_dump_args *args; args = container_of(iter, struct nft_set_dump_args, iter); return nf_tables_fill_setelem(args->skb, set, elem); } static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) { const struct nft_set *set; struct nft_set_dump_args args; struct nft_ctx ctx; struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *nest; u32 portid, seq; int event, err; err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy); if (err < 0) return err; err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, false); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; event = NFT_MSG_NEWSETELEM; event |= NFNL_SUBSYS_NFTABLES << 8; portid = NETLINK_CB(cb->skb).portid; seq = cb->nlh->nlmsg_seq; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), NLM_F_MULTI); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx.afi->family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) goto nla_put_failure; nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS); if (nest == NULL) goto nla_put_failure; args.cb = cb; args.skb = skb; args.iter.skip = cb->args[0]; args.iter.count = 0; args.iter.err = 0; args.iter.fn = nf_tables_dump_setelem; set->ops->walk(&ctx, set, &args.iter); nla_nest_end(skb, nest); nlmsg_end(skb, nlh); if (args.iter.err && args.iter.err != -EMSGSIZE) return args.iter.err; if (args.iter.count == cb->args[0]) return 0; cb->args[0] = args.iter.count; return skb->len; nla_put_failure: return -ENOSPC; } static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nft_set *set; struct nft_ctx ctx; int err; err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_set, }; return netlink_dump_start(nlsk, skb, nlh, &c); } return -EOPNOTSUPP; } static int nf_tables_fill_setelem_info(struct sk_buff *skb, const struct nft_ctx *ctx, u32 seq, u32 portid, int event, u16 flags, const struct nft_set *set, const struct nft_set_elem *elem) { struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *nest; int err; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx->afi->family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_NAME, set->name)) goto nla_put_failure; nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS); if (nest == NULL) goto nla_put_failure; err = nf_tables_fill_setelem(skb, set, elem); if (err < 0) goto nla_put_failure; nla_nest_end(skb, nest); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_setelem_notify(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_elem *elem, int event, u16 flags) { struct net *net = ctx->net; u32 portid = ctx->portid; struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags, set, elem); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); return err; } static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx, int msg_type, struct nft_set *set) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem)); if (trans == NULL) return NULL; nft_trans_elem_set(trans) = set; return trans; } static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; struct nft_data_desc d1, d2; struct nft_set_elem elem; struct nft_set_binding *binding; enum nft_registers dreg; struct nft_trans *trans; int err; if (set->size && set->nelems == set->size) return -ENFILE; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, nft_set_elem_policy); if (err < 0) return err; if (nla[NFTA_SET_ELEM_KEY] == NULL) return -EINVAL; elem.flags = 0; if (nla[NFTA_SET_ELEM_FLAGS] != NULL) { elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS])); if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END) return -EINVAL; } if (set->flags & NFT_SET_MAP) { if (nla[NFTA_SET_ELEM_DATA] == NULL && !(elem.flags & NFT_SET_ELEM_INTERVAL_END)) return -EINVAL; if (nla[NFTA_SET_ELEM_DATA] != NULL && elem.flags & NFT_SET_ELEM_INTERVAL_END) return -EINVAL; } else { if (nla[NFTA_SET_ELEM_DATA] != NULL) return -EINVAL; } err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]); if (err < 0) goto err1; err = -EINVAL; if (d1.type != NFT_DATA_VALUE || d1.len != set->klen) goto err2; err = -EEXIST; if (set->ops->get(set, &elem) == 0) goto err2; if (nla[NFTA_SET_ELEM_DATA] != NULL) { err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]); if (err < 0) goto err2; err = -EINVAL; if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen) goto err3; dreg = nft_type_to_reg(set->dtype); list_for_each_entry(binding, &set->bindings, list) { struct nft_ctx bind_ctx = { .afi = ctx->afi, .table = ctx->table, .chain = (struct nft_chain *)binding->chain, }; err = nft_validate_data_load(&bind_ctx, dreg, &elem.data, d2.type); if (err < 0) goto err3; } } trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set); if (trans == NULL) goto err3; err = set->ops->insert(set, &elem); if (err < 0) goto err4; nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err4: kfree(trans); err3: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_uninit(&elem.data, d2.type); err2: nft_data_uninit(&elem.key, d1.type); err1: return err; } static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { struct net *net = sock_net(skb->sk); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; int rem, err = 0; if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL; err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) { if (nla[NFTA_SET_ELEM_LIST_SET_ID]) { set = nf_tables_set_lookup_byid(net, nla[NFTA_SET_ELEM_LIST_SET_ID]); } if (IS_ERR(set)) return PTR_ERR(set); } if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY; nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { err = nft_add_set_elem(&ctx, set, attr); if (err < 0) break; set->nelems++; } return err; } static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; struct nft_data_desc desc; struct nft_set_elem elem; struct nft_trans *trans; int err; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, nft_set_elem_policy); if (err < 0) goto err1; err = -EINVAL; if (nla[NFTA_SET_ELEM_KEY] == NULL) goto err1; err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]); if (err < 0) goto err1; err = -EINVAL; if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) goto err2; err = set->ops->get(set, &elem); if (err < 0) goto err2; trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set); if (trans == NULL) { err = -ENOMEM; goto err2; } nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err2: nft_data_uninit(&elem.key, desc.type); err1: return err; } static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; int rem, err = 0; if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL; err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY; nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { err = nft_del_setelem(&ctx, set, attr); if (err < 0) break; set->nelems--; } return err; } static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_NEWGEN; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = AF_UNSPEC; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -EMSGSIZE; } static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) { struct nlmsghdr *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; int err; if (nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) goto err; err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) { kfree_skb(skb2); goto err; } err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_getgen(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { struct net *net = sock_net(skb->sk); struct sk_buff *skb2; int err; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) return -ENOMEM; err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { [NFT_MSG_NEWTABLE] = { .call_batch = nf_tables_newtable, .attr_count = NFTA_TABLE_MAX, .policy = nft_table_policy, }, [NFT_MSG_GETTABLE] = { .call = nf_tables_gettable, .attr_count = NFTA_TABLE_MAX, .policy = nft_table_policy, }, [NFT_MSG_DELTABLE] = { .call_batch = nf_tables_deltable, .attr_count = NFTA_TABLE_MAX, .policy = nft_table_policy, }, [NFT_MSG_NEWCHAIN] = { .call_batch = nf_tables_newchain, .attr_count = NFTA_CHAIN_MAX, .policy = nft_chain_policy, }, [NFT_MSG_GETCHAIN] = { .call = nf_tables_getchain, .attr_count = NFTA_CHAIN_MAX, .policy = nft_chain_policy, }, [NFT_MSG_DELCHAIN] = { .call_batch = nf_tables_delchain, .attr_count = NFTA_CHAIN_MAX, .policy = nft_chain_policy, }, [NFT_MSG_NEWRULE] = { .call_batch = nf_tables_newrule, .attr_count = NFTA_RULE_MAX, .policy = nft_rule_policy, }, [NFT_MSG_GETRULE] = { .call = nf_tables_getrule, .attr_count = NFTA_RULE_MAX, .policy = nft_rule_policy, }, [NFT_MSG_DELRULE] = { .call_batch = nf_tables_delrule, .attr_count = NFTA_RULE_MAX, .policy = nft_rule_policy, }, [NFT_MSG_NEWSET] = { .call_batch = nf_tables_newset, .attr_count = NFTA_SET_MAX, .policy = nft_set_policy, }, [NFT_MSG_GETSET] = { .call = nf_tables_getset, .attr_count = NFTA_SET_MAX, .policy = nft_set_policy, }, [NFT_MSG_DELSET] = { .call_batch = nf_tables_delset, .attr_count = NFTA_SET_MAX, .policy = nft_set_policy, }, [NFT_MSG_NEWSETELEM] = { .call_batch = nf_tables_newsetelem, .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, [NFT_MSG_GETSETELEM] = { .call = nf_tables_getsetelem, .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, [NFT_MSG_DELSETELEM] = { .call_batch = nf_tables_delsetelem, .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, [NFT_MSG_GETGEN] = { .call = nf_tables_getgen, }, }; static void nft_chain_commit_update(struct nft_trans *trans) { struct nft_base_chain *basechain; if (nft_trans_chain_name(trans)[0]) strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN)) return; basechain = nft_base_chain(trans->ctx.chain); nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans)); switch (nft_trans_chain_policy(trans)) { case NF_DROP: case NF_ACCEPT: basechain->policy = nft_trans_chain_policy(trans); break; } } static void nf_tables_commit_release(struct nft_trans *trans) { switch (trans->msg_type) { case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_DELCHAIN: nf_tables_chain_destroy(trans->ctx.chain); break; case NFT_MSG_DELRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_DELSET: nft_set_destroy(nft_trans_set(trans)); break; } kfree(trans); } static int nf_tables_commit(struct sk_buff *skb) { struct net *net = sock_net(skb->sk); struct nft_trans *trans, *next; struct nft_trans_elem *te; /* Bump generation counter, invalidate any dump in progress */ while (++net->nft.base_seq == 0); /* A new generation has just started */ net->nft.gencursor = gencursor_next(net); /* Make sure all packets have left the previous generation before * purging old rules. */ synchronize_rcu(); list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (!nft_trans_table_enable(trans)) { nf_tables_table_disable(trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } } else { trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE; } nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE); nft_trans_destroy(trans); break; case NFT_MSG_DELTABLE: nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) nft_chain_commit_update(trans); else trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE; nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); nft_trans_destroy(trans); break; case NFT_MSG_DELCHAIN: nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); nf_tables_unregister_hooks(trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); break; case NFT_MSG_NEWRULE: nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); nft_trans_destroy(trans); break; case NFT_MSG_DELRULE: list_del_rcu(&nft_trans_rule(trans)->list); nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_DELRULE); break; case NFT_MSG_NEWSET: nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE; /* This avoids hitting -EBUSY when deleting the table * from the transaction. */ if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS && !list_empty(&nft_trans_set(trans)->bindings)) trans->ctx.table->use--; nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); nft_trans_destroy(trans); break; case NFT_MSG_DELSET: nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_DELSET, GFP_KERNEL); break; case NFT_MSG_NEWSETELEM: nf_tables_setelem_notify(&trans->ctx, nft_trans_elem_set(trans), &nft_trans_elem(trans), NFT_MSG_NEWSETELEM, 0); nft_trans_destroy(trans); break; case NFT_MSG_DELSETELEM: te = (struct nft_trans_elem *)trans->data; nf_tables_setelem_notify(&trans->ctx, te->set, &te->elem, NFT_MSG_DELSETELEM, 0); te->set->ops->get(te->set, &te->elem); te->set->ops->remove(te->set, &te->elem); nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); if (te->elem.flags & NFT_SET_MAP) { nft_data_uninit(&te->elem.data, te->set->dtype); } nft_trans_destroy(trans); break; } } synchronize_rcu(); list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); nf_tables_commit_release(trans); } nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); return 0; } static void nf_tables_abort_release(struct nft_trans *trans) { switch (trans->msg_type) { case NFT_MSG_NEWTABLE: nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_NEWCHAIN: nf_tables_chain_destroy(trans->ctx.chain); break; case NFT_MSG_NEWRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_NEWSET: nft_set_destroy(nft_trans_set(trans)); break; } kfree(trans); } static int nf_tables_abort(struct sk_buff *skb) { struct net *net = sock_net(skb->sk); struct nft_trans *trans, *next; struct nft_set *set; list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (nft_trans_table_enable(trans)) { nf_tables_table_disable(trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } nft_trans_destroy(trans); } else { list_del_rcu(&trans->ctx.table->list); } break; case NFT_MSG_DELTABLE: list_add_tail_rcu(&trans->ctx.table->list, &trans->ctx.afi->tables); nft_trans_destroy(trans); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { free_percpu(nft_trans_chain_stats(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); nf_tables_unregister_hooks(trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); } break; case NFT_MSG_DELCHAIN: trans->ctx.table->use++; list_add_tail_rcu(&trans->ctx.chain->list, &trans->ctx.table->chains); nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: trans->ctx.chain->use--; list_del_rcu(&nft_trans_rule(trans)->list); break; case NFT_MSG_DELRULE: trans->ctx.chain->use++; nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: trans->ctx.table->use--; list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: trans->ctx.table->use++; list_add_tail_rcu(&nft_trans_set(trans)->list, &trans->ctx.table->sets); nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: nft_trans_elem_set(trans)->nelems--; set = nft_trans_elem_set(trans); set->ops->get(set, &nft_trans_elem(trans)); set->ops->remove(set, &nft_trans_elem(trans)); nft_trans_destroy(trans); break; case NFT_MSG_DELSETELEM: nft_trans_elem_set(trans)->nelems++; nft_trans_destroy(trans); break; } } synchronize_rcu(); list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); nf_tables_abort_release(trans); } return 0; } static const struct nfnetlink_subsystem nf_tables_subsys = { .name = "nf_tables", .subsys_id = NFNL_SUBSYS_NFTABLES, .cb_count = NFT_MSG_MAX, .cb = nf_tables_cb, .commit = nf_tables_commit, .abort = nf_tables_abort, }; int nft_chain_validate_dependency(const struct nft_chain *chain, enum nft_chain_type type) { const struct nft_base_chain *basechain; if (chain->flags & NFT_BASE_CHAIN) { basechain = nft_base_chain(chain); if (basechain->type->type != type) return -EOPNOTSUPP; } return 0; } EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); /* * Loop detection - walk through the ruleset beginning at the destination chain * of a new jump until either the source chain is reached (loop) or all * reachable chains have been traversed. * * The loop check is performed whenever a new jump verdict is added to an * expression or verdict map or a verdict map is bound to a new chain. */ static int nf_tables_check_loops(const struct nft_ctx *ctx, const struct nft_chain *chain); static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) { if (elem->flags & NFT_SET_ELEM_INTERVAL_END) return 0; switch (elem->data.verdict) { case NFT_JUMP: case NFT_GOTO: return nf_tables_check_loops(ctx, elem->data.chain); default: return 0; } } static int nf_tables_check_loops(const struct nft_ctx *ctx, const struct nft_chain *chain) { const struct nft_rule *rule; const struct nft_expr *expr, *last; const struct nft_set *set; struct nft_set_binding *binding; struct nft_set_iter iter; if (ctx->chain == chain) return -ELOOP; list_for_each_entry(rule, &chain->rules, list) { nft_rule_for_each_expr(expr, last, rule) { const struct nft_data *data = NULL; int err; if (!expr->ops->validate) continue; err = expr->ops->validate(ctx, expr, &data); if (err < 0) return err; if (data == NULL) continue; switch (data->verdict) { case NFT_JUMP: case NFT_GOTO: err = nf_tables_check_loops(ctx, data->chain); if (err < 0) return err; default: break; } } } list_for_each_entry(set, &ctx->table->sets, list) { if (!(set->flags & NFT_SET_MAP) || set->dtype != NFT_DATA_VERDICT) continue; list_for_each_entry(binding, &set->bindings, list) { if (binding->chain != chain) continue; iter.skip = 0; iter.count = 0; iter.err = 0; iter.fn = nf_tables_loop_check_setelem; set->ops->walk(ctx, set, &iter); if (iter.err < 0) return iter.err; } } return 0; } /** * nft_validate_input_register - validate an expressions' input register * * @reg: the register number * * Validate that the input register is one of the general purpose * registers. */ int nft_validate_input_register(enum nft_registers reg) { if (reg <= NFT_REG_VERDICT) return -EINVAL; if (reg > NFT_REG_MAX) return -ERANGE; return 0; } EXPORT_SYMBOL_GPL(nft_validate_input_register); /** * nft_validate_output_register - validate an expressions' output register * * @reg: the register number * * Validate that the output register is one of the general purpose * registers or the verdict register. */ int nft_validate_output_register(enum nft_registers reg) { if (reg < NFT_REG_VERDICT) return -EINVAL; if (reg > NFT_REG_MAX) return -ERANGE; return 0; } EXPORT_SYMBOL_GPL(nft_validate_output_register); /** * nft_validate_data_load - validate an expressions' data load * * @ctx: context of the expression performing the load * @reg: the destination register number * @data: the data to load * @type: the data type * * Validate that a data load uses the appropriate data type for * the destination register. A value of NULL for the data means * that its runtime gathered data, which is always of type * NFT_DATA_VALUE. */ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, const struct nft_data *data, enum nft_data_types type) { int err; switch (reg) { case NFT_REG_VERDICT: if (data == NULL || type != NFT_DATA_VERDICT) return -EINVAL; if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) { err = nf_tables_check_loops(ctx, data->chain); if (err < 0) return err; if (ctx->chain->level + 1 > data->chain->level) { if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE) return -EMLINK; data->chain->level = ctx->chain->level + 1; } } return 0; default: if (data != NULL && type != NFT_DATA_VALUE) return -EINVAL; return 0; } } EXPORT_SYMBOL_GPL(nft_validate_data_load); static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = { [NFTA_VERDICT_CODE] = { .type = NLA_U32 }, [NFTA_VERDICT_CHAIN] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, }; static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { struct nlattr *tb[NFTA_VERDICT_MAX + 1]; struct nft_chain *chain; int err; err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy); if (err < 0) return err; if (!tb[NFTA_VERDICT_CODE]) return -EINVAL; data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE])); switch (data->verdict) { default: switch (data->verdict & NF_VERDICT_MASK) { case NF_ACCEPT: case NF_DROP: case NF_QUEUE: break; default: return -EINVAL; } /* fall through */ case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: desc->len = sizeof(data->verdict); break; case NFT_JUMP: case NFT_GOTO: if (!tb[NFTA_VERDICT_CHAIN]) return -EINVAL; chain = nf_tables_chain_lookup(ctx->table, tb[NFTA_VERDICT_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_BASE_CHAIN) return -EOPNOTSUPP; chain->use++; data->chain = chain; desc->len = sizeof(data); break; } desc->type = NFT_DATA_VERDICT; return 0; } static void nft_verdict_uninit(const struct nft_data *data) { switch (data->verdict) { case NFT_JUMP: case NFT_GOTO: data->chain->use--; break; } } static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data) { struct nlattr *nest; nest = nla_nest_start(skb, NFTA_DATA_VERDICT); if (!nest) goto nla_put_failure; if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict))) goto nla_put_failure; switch (data->verdict) { case NFT_JUMP: case NFT_GOTO: if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name)) goto nla_put_failure; } nla_nest_end(skb, nest); return 0; nla_put_failure: return -1; } static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { unsigned int len; len = nla_len(nla); if (len == 0) return -EINVAL; if (len > sizeof(data->data)) return -EOVERFLOW; nla_memcpy(data->data, nla, sizeof(data->data)); desc->type = NFT_DATA_VALUE; desc->len = len; return 0; } static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data, unsigned int len) { return nla_put(skb, NFTA_DATA_VALUE, len, data->data); } static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = { [NFTA_DATA_VALUE] = { .type = NLA_BINARY, .len = FIELD_SIZEOF(struct nft_data, data) }, [NFTA_DATA_VERDICT] = { .type = NLA_NESTED }, }; /** * nft_data_init - parse nf_tables data netlink attributes * * @ctx: context of the expression using the data * @data: destination struct nft_data * @desc: data description * @nla: netlink attribute containing data * * Parse the netlink data attributes and initialize a struct nft_data. * The type and length of data are returned in the data description. * * The caller can indicate that it only wants to accept data of type * NFT_DATA_VALUE by passing NULL for the ctx argument. */ int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { struct nlattr *tb[NFTA_DATA_MAX + 1]; int err; err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy); if (err < 0) return err; if (tb[NFTA_DATA_VALUE]) return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]); if (tb[NFTA_DATA_VERDICT] && ctx != NULL) return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]); return -EINVAL; } EXPORT_SYMBOL_GPL(nft_data_init); /** * nft_data_uninit - release a nft_data item * * @data: struct nft_data to release * @type: type of data * * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, * all others need to be released by calling this function. */ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) { switch (type) { case NFT_DATA_VALUE: return; case NFT_DATA_VERDICT: return nft_verdict_uninit(data); default: WARN_ON(1); } } EXPORT_SYMBOL_GPL(nft_data_uninit); int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, enum nft_data_types type, unsigned int len) { struct nlattr *nest; int err; nest = nla_nest_start(skb, attr); if (nest == NULL) return -1; switch (type) { case NFT_DATA_VALUE: err = nft_value_dump(skb, data, len); break; case NFT_DATA_VERDICT: err = nft_verdict_dump(skb, data); break; default: err = -EINVAL; WARN_ON(1); } nla_nest_end(skb, nest); return err; } EXPORT_SYMBOL_GPL(nft_data_dump); static int nf_tables_init_net(struct net *net) { INIT_LIST_HEAD(&net->nft.af_info); INIT_LIST_HEAD(&net->nft.commit_list); net->nft.base_seq = 1; return 0; } static struct pernet_operations nf_tables_net_ops = { .init = nf_tables_init_net, }; static int __init nf_tables_module_init(void) { int err; info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS, GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto err1; } err = nf_tables_core_module_init(); if (err < 0) goto err2; err = nfnetlink_subsys_register(&nf_tables_subsys); if (err < 0) goto err3; pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n"); return register_pernet_subsys(&nf_tables_net_ops); err3: nf_tables_core_module_exit(); err2: kfree(info); err1: return err; } static void __exit nf_tables_module_exit(void) { unregister_pernet_subsys(&nf_tables_net_ops); nfnetlink_subsys_unregister(&nf_tables_subsys); rcu_barrier(); nf_tables_core_module_exit(); kfree(info); } module_init(nf_tables_module_init); module_exit(nf_tables_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
./CrossVul/dataset_final_sorted/CWE-19/c/bad_1487_0
crossvul-cpp_data_bad_5253_0
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/ptp_classify.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/xattr.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> #include <net/busy_poll.h> #include <linux/errqueue.h> #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; unsigned int sysctl_net_busy_poll __read_mostly; #endif static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to); static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .read_iter = sock_read_iter, .write_iter = sock_write_iter, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; BUG_ON(klen > sizeof(struct sockaddr_storage)); err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; wq->flags = 0; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", d_inode(dentry)->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) { struct qstr name = { .name = "" }; struct path path; struct file *file; if (dname) { name.name = dname; name.len = strlen(name.name); } else if (sock->sk) { name.name = sock->sk->sk_prot_creator->name; name.len = strlen(name.name); } path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) return ERR_PTR(-ENOMEM); path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (IS_ERR(file)) { /* drop dentry, keep inode */ ihold(d_inode(path.dentry)); path_put(&path); return file; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->private_data = sock; return file; } EXPORT_SYMBOL(sock_alloc_file); static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; newfile = sock_alloc_file(sock, flags, NULL); if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); return fd; } put_unused_fd(fd); return PTR_ERR(newfile); } struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct fd f = fdget(fd); struct socket *sock; *err = -EBADF; if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { *fput_needed = f.flags; return sock; } fdput(f); } return NULL; } #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) static ssize_t sockfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { const char *proto_name; size_t proto_size; int error; error = -ENODATA; if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { proto_name = dentry->d_name.name; proto_size = strlen(proto_name); if (value) { error = -ERANGE; if (proto_size + 1 > size) goto out; strncpy(value, proto_name, proto_size + 1); } error = proto_size + 1; } out: return error; } static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t len; ssize_t used = 0; len = security_inode_listsecurity(d_inode(dentry), buffer, size); if (len < 0) return len; used += len; if (buffer) { if (size < used) return -ERANGE; buffer += len; } len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); used += len; if (buffer) { if (size < used) return -ERANGE; memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); buffer += len; } return used; } static const struct inode_operations sockfs_inode_ops = { .getxattr = sockfs_getxattr, .listxattr = sockfs_listxattr, }; /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_op = &sockfs_inode_ops; this_cpu_add(sockets_in_use, 1); return sock; } EXPORT_SYMBOL(sock_alloc); /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) pr_err("%s: fasync list not empty!\n", __func__); this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) { u8 flags = *tx_flags; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE) flags |= SKBTX_HW_TSTAMP; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) flags |= SKBTX_SW_TSTAMP; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) flags |= SKBTX_ACK_TSTAMP; *tx_flags = flags; } EXPORT_SYMBOL(__sock_tx_timestamp); static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) { int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg)); BUG_ON(ret == -EIOCBQUEUED); return ret; } int sock_sendmsg(struct socket *sock, struct msghdr *msg) { int err = security_socket_sendmsg(sock, msg, msg_data_left(msg)); return err ?: sock_sendmsg_nosec(sock, msg); } EXPORT_SYMBOL(sock_sendmsg); int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); return sock_sendmsg(sock, msg); } EXPORT_SYMBOL(kernel_sendmsg); /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct scm_timestamping tss; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { struct timespec ts; skb_get_timestampns(skb, &ts); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts); } } memset(&tss, 0, sizeof(tss)); if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) && ktime_to_timespec_cond(skb->tstamp, tss.ts + 0)) empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) empty = 0; if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(tss), &tss); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int ack; if (!sock_flag(sk, SOCK_WIFI_STATUS)) return; if (!skb->wifi_acked_valid) return; ack = skb->wifi_acked; put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); } EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { return sock->ops->recvmsg(sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: sock_recvmsg_nosec(sock, msg, size, flags); } EXPORT_SYMBOL(sock_recvmsg); /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size); set_fs(KERNEL_DS); result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; struct msghdr msg = {.msg_iter = *to, .msg_iocb = iocb}; ssize_t res; if (file->f_flags & O_NONBLOCK) msg.msg_flags = MSG_DONTWAIT; if (iocb->ki_pos != 0) return -ESPIPE; if (!iov_iter_count(to)) /* Match SYS5 behaviour */ return 0; res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags); *to = msg.msg_iter; return res; } static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; struct msghdr msg = {.msg_iter = *from, .msg_iocb = iocb}; ssize_t res; if (iocb->ki_pos != 0) return -ESPIPE; if (file->f_flags & O_NONBLOCK) msg.msg_flags = MSG_DONTWAIT; if (sock->type == SOCK_SEQPACKET) msg.msg_flags |= MSG_EOR; res = sock_sendmsg(sock, &msg); *from = msg.msg_iter; return res; } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; f_setown(sock->file, pid, 1); err = 0; break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { unsigned int busy_flag = 0; struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; if (sk_can_busy_loop(sock->sk)) { /* this socket can poll_ll so tell the system call */ busy_flag = POLL_BUSY_LOOP; /* once, only if requested by syscall */ if (wait && (wait->_key & POLL_BUSY_LOOP)) sk_busy_loop(sock->sk, 1); } return busy_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under rcu_lock */ int sock_wake_async(struct socket_wq *wq, int how, int band) { if (!wq || !wq->fasync_list) return -1; switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { pr_info_once("%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res) { return __sock_create(net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = get_unused_fd_flags(flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = get_unused_fd_flags(flags); if (unlikely(fd2 < 0)) { err = fd2; goto out_put_unused_1; } newfile1 = sock_alloc_file(sock1, flags, NULL); if (IS_ERR(newfile1)) { err = PTR_ERR(newfile1); goto out_put_unused_both; } newfile2 = sock_alloc_file(sock2, flags, NULL); if (IS_ERR(newfile2)) { err = PTR_ERR(newfile2); goto out_fput_1; } err = put_user(fd1, &usockvec[0]); if (err) goto out_fput_both; err = put_user(fd2, &usockvec[1]); if (err) goto out_fput_both; audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ return 0; out_fput_both: fput(newfile2); fput(newfile1); put_unused_fd(fd2); put_unused_fd(fd1); goto out; out_fput_1: fput(newfile1); put_unused_fd(fd2); put_unused_fd(fd1); sock_release(sock2); goto out; out_put_unused_both: put_unused_fd(fd2); out_put_unused_1: put_unused_fd(fd1); out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = get_unused_fd_flags(flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); if (IS_ERR(newfile)) { err = PTR_ERR(newfile); put_unused_fd(newfd); sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user(&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, &address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter); if (unlikely(err)) return err; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_name = NULL; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, &address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter); if (unlikely(err)) return err; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; /* Save some cycles and don't copy the address if not needed */ msg.msg_name = addr ? (struct sockaddr *)&address : NULL; /* We assume all kernel code knows the size of sockaddr_storage */ msg.msg_namelen = 0; msg.msg_iocb = NULL; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int copy_msghdr_from_user(struct msghdr *kmsg, struct user_msghdr __user *umsg, struct sockaddr __user **save_addr, struct iovec **iov) { struct sockaddr __user *uaddr; struct iovec __user *uiov; size_t nr_segs; ssize_t err; if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || __get_user(uaddr, &umsg->msg_name) || __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || __get_user(uiov, &umsg->msg_iov) || __get_user(nr_segs, &umsg->msg_iovlen) || __get_user(kmsg->msg_control, &umsg->msg_control) || __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; if (!uaddr) kmsg->msg_namelen = 0; if (kmsg->msg_namelen < 0) return -EINVAL; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); if (save_addr) *save_addr = uaddr; if (uaddr && kmsg->msg_namelen) { if (!save_addr) { err = move_addr_to_kernel(uaddr, kmsg->msg_namelen, kmsg->msg_name); if (err < 0) return err; } } else { kmsg->msg_name = NULL; kmsg->msg_namelen = 0; } if (nr_segs > UIO_MAXIOV) return -EMSGSIZE; kmsg->msg_iocb = NULL; return import_iovec(save_addr ? READ : WRITE, uiov, nr_segs, UIO_FASTIOV, iov, &kmsg->msg_iter); } static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address, unsigned int allowed_msghdr_flags) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int ctl_len; ssize_t err; msg_sys->msg_name = &address; if (MSG_CMSG_COMPAT & flags) err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); else err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); if (err < 0) return err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; flags |= (msg_sys->msg_flags & allowed_msghdr_flags); ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys); goto out_freectl; } err = sock_sendmsg(sock, msg_sys); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: kfree(iov); return err; } /* * BSD sendmsg interface */ long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmsg(fd, msg, flags); } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; unsigned int oflags = flags; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; flags |= MSG_BATCH; while (datagrams < vlen) { if (datagrams == vlen - 1) flags = oflags; if (MSG_CMSG_COMPAT & flags) { err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags, &used_address, MSG_EOR); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_sendmsg(sock, (struct user_msghdr __user *)entry, &msg_sys, flags, &used_address, MSG_EOR); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; cond_resched(); } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int total_len, len; ssize_t err; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len = COMPAT_NAMELEN(msg); msg_sys->msg_name = &addr; if (MSG_CMSG_COMPAT & flags) err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); else err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); if (err < 0) return err; total_len = iov_iter_count(&msg_sys->msg_iter); cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); /* We assume all kernel code knows the size of sockaddr_storage */ msg_sys->msg_namelen = 0; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user(&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: kfree(iov); return err; } /* * BSD recvmsg interface */ long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_recvmsg(fd, msg, flags); } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_recvmsg(sock, (struct user_msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; cond_resched(); } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[AUDITSC_ARGS]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); if (err) return err; a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family corresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); pr_info("NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); pr_info("NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize the network sysctl infrastructure. */ err = net_sysctl_init(); if (err) goto out; /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER err = netfilter_init(); if (err) goto out; #endif ptp_classifier_init(); out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void __user *)(&rxnfc->fs.m_ext + 1) - (void __user *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void __user *)(&rxnfc->fs.location + 1) - (void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void __user *)(&rxnfc->fs.m_ext + 1) - (const void __user *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void __user *)(&rxnfc->fs.location + 1) - (const void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; mm_segment_t old_fs; int err; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; default: return -ENOIOCTLCMD; } } /* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= get_user(r4.rt_window, &(ur4->rt_window)); ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return compat_ifr_data_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCSHWTSTAMP: case SIOCGHWTSTAMP: return compat_ifr_data_ioctl(net, cmd, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
./CrossVul/dataset_final_sorted/CWE-19/c/bad_5253_0
crossvul-cpp_data_bad_1843_1
/* * linux/fs/ext2/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/random.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/vfs.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/log2.h> #include <linux/quotaops.h> #include <asm/uaccess.h> #include "ext2.h" #include "xattr.h" #include "acl.h" static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es, int wait); static int ext2_remount (struct super_block * sb, int * flags, char * data); static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext2_sync_fs(struct super_block *sb, int wait); static int ext2_freeze(struct super_block *sb); static int ext2_unfreeze(struct super_block *sb); void ext2_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = sbi->s_es; if (!(sb->s_flags & MS_RDONLY)) { spin_lock(&sbi->s_lock); sbi->s_mount_state |= EXT2_ERROR_FS; es->s_state |= cpu_to_le16(EXT2_ERROR_FS); spin_unlock(&sbi->s_lock); ext2_sync_super(sb, es, 1); } va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (test_opt(sb, ERRORS_PANIC)) panic("EXT2-fs: panic from previous error\n"); if (test_opt(sb, ERRORS_RO)) { ext2_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } } void ext2_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } /* * This must be called with sbi->s_lock held. */ void ext2_update_dynamic_rev(struct super_block *sb) { struct ext2_super_block *es = EXT2_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV) return; ext2_msg(sb, KERN_WARNING, "warning: updating to rev %d because of " "new feature flag, running e2fsck is recommended", EXT2_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } static void ext2_put_super (struct super_block * sb) { int db_count; int i; struct ext2_sb_info *sbi = EXT2_SB(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); ext2_xattr_put_super(sb); if (!(sb->s_flags & MS_RDONLY)) { struct ext2_super_block *es = sbi->s_es; spin_lock(&sbi->s_lock); es->s_state = cpu_to_le16(sbi->s_mount_state); spin_unlock(&sbi->s_lock); ext2_sync_super(sb, es, 1); } db_count = sbi->s_gdb_count; for (i = 0; i < db_count; i++) if (sbi->s_group_desc[i]) brelse (sbi->s_group_desc[i]); kfree(sbi->s_group_desc); kfree(sbi->s_debts); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); brelse (sbi->s_sbh); sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache * ext2_inode_cachep; static struct inode *ext2_alloc_inode(struct super_block *sb) { struct ext2_inode_info *ei; ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; #ifdef CONFIG_QUOTA memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); #endif return &ei->vfs_inode; } static void ext2_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); } static void ext2_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, ext2_i_callback); } static void init_once(void *foo) { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; rwlock_init(&ei->i_meta_lock); #ifdef CONFIG_EXT2_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); #ifdef CONFIG_FS_DAX init_rwsem(&ei->dax_sem); #endif inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", sizeof(struct ext2_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (ext2_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext2_inode_cachep); } static int ext2_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = sbi->s_es; unsigned long def_mount_opts; spin_lock(&sbi->s_lock); def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%lu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT2_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) { seq_printf(seq, ",resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); } if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT2_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) { seq_printf(seq, ",resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); } if (test_opt(sb, ERRORS_RO)) { int def_errors = le16_to_cpu(es->s_errors); if (def_errors == EXT2_ERRORS_PANIC || def_errors == EXT2_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG)) seq_puts(seq, ",debug"); if (test_opt(sb, OLDALLOC)) seq_puts(seq, ",oldalloc"); #ifdef CONFIG_EXT2_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER) && (def_mount_opts & EXT2_DEFM_XATTR_USER)) { seq_puts(seq, ",nouser_xattr"); } #endif #ifdef CONFIG_EXT2_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (test_opt(sb, NOBH)) seq_puts(seq, ",nobh"); #if defined(CONFIG_QUOTA) if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA) seq_puts(seq, ",usrquota"); if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA) seq_puts(seq, ",grpquota"); #endif #ifdef CONFIG_FS_DAX if (sbi->s_mount_opt & EXT2_MOUNT_XIP) seq_puts(seq, ",xip"); if (sbi->s_mount_opt & EXT2_MOUNT_DAX) seq_puts(seq, ",dax"); #endif if (!test_opt(sb, RESERVATION)) seq_puts(seq, ",noreservation"); spin_unlock(&sbi->s_lock); return 0; } #ifdef CONFIG_QUOTA static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static struct dquot **ext2_get_dquots(struct inode *inode) { return EXT2_I(inode)->i_dquot; } #endif static const struct super_operations ext2_sops = { .alloc_inode = ext2_alloc_inode, .destroy_inode = ext2_destroy_inode, .write_inode = ext2_write_inode, .evict_inode = ext2_evict_inode, .put_super = ext2_put_super, .sync_fs = ext2_sync_fs, .freeze_fs = ext2_freeze, .unfreeze_fs = ext2_unfreeze, .statfs = ext2_statfs, .remount_fs = ext2_remount, .show_options = ext2_show_options, #ifdef CONFIG_QUOTA .quota_read = ext2_quota_read, .quota_write = ext2_quota_write, .get_dquots = ext2_get_dquots, #endif }; static struct inode *ext2_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* * ext2_iget isn't quite right if the inode is currently unallocated! * However ext2_iget currently does appropriate checks to handle stale * inodes so everything is OK. */ inode = ext2_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { /* we didn't find the right inode.. */ iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext2_nfs_get_inode); } static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext2_nfs_get_inode); } static const struct export_operations ext2_export_ops = { .fh_to_dentry = ext2_fh_to_dentry, .fh_to_parent = ext2_fh_to_parent, .get_parent = ext2_get_parent, }; static unsigned long get_sb_block(void **data) { unsigned long sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { printk("EXT2-fs: Invalid sb specification: %s\n", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_nocheck, "check=none"}, {Opt_nocheck, "nocheck"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_nobh, "nobh"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_xip, "xip"}, {Opt_dax, "dax"}, {Opt_grpquota, "grpquota"}, {Opt_ignore, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_reservation, "reservation"}, {Opt_noreservation, "noreservation"}, {Opt_err, NULL} }; static int parse_options(char *options, struct super_block *sb) { char *p; struct ext2_sb_info *sbi = EXT2_SB(sb); substring_t args[MAX_OPT_ARGS]; int option; kuid_t uid; kgid_t gid; if (!options) return 1; while ((p = strsep (&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: clear_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_minix_df: set_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_grpid: set_opt (sbi->s_mount_opt, GRPID); break; case Opt_nogrpid: clear_opt (sbi->s_mount_opt, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) { ext2_msg(sb, KERN_ERR, "Invalid uid value %d", option); return 0; } sbi->s_resuid = uid; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) { ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option); return 0; } sbi->s_resgid = gid; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_RO); set_opt (sbi->s_mount_opt, ERRORS_PANIC); break; case Opt_err_ro: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_RO); break; case Opt_err_cont: clear_opt (sbi->s_mount_opt, ERRORS_RO); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_CONT); break; case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; case Opt_debug: set_opt (sbi->s_mount_opt, DEBUG); break; case Opt_oldalloc: set_opt (sbi->s_mount_opt, OLDALLOC); break; case Opt_orlov: clear_opt (sbi->s_mount_opt, OLDALLOC); break; case Opt_nobh: set_opt (sbi->s_mount_opt, NOBH); break; #ifdef CONFIG_EXT2_FS_XATTR case Opt_user_xattr: set_opt (sbi->s_mount_opt, XATTR_USER); break; case Opt_nouser_xattr: clear_opt (sbi->s_mount_opt, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext2_msg(sb, KERN_INFO, "(no)user_xattr options" "not supported"); break; #endif #ifdef CONFIG_EXT2_FS_POSIX_ACL case Opt_acl: set_opt(sbi->s_mount_opt, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi->s_mount_opt, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext2_msg(sb, KERN_INFO, "(no)acl options not supported"); break; #endif case Opt_xip: ext2_msg(sb, KERN_INFO, "use dax instead of xip"); set_opt(sbi->s_mount_opt, XIP); /* Fall through */ case Opt_dax: #ifdef CONFIG_FS_DAX ext2_msg(sb, KERN_WARNING, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); set_opt(sbi->s_mount_opt, DAX); #else ext2_msg(sb, KERN_INFO, "dax option not supported"); #endif break; #if defined(CONFIG_QUOTA) case Opt_quota: case Opt_usrquota: set_opt(sbi->s_mount_opt, USRQUOTA); break; case Opt_grpquota: set_opt(sbi->s_mount_opt, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext2_msg(sb, KERN_INFO, "quota operations not supported"); break; #endif case Opt_reservation: set_opt(sbi->s_mount_opt, RESERVATION); ext2_msg(sb, KERN_INFO, "reservations ON"); break; case Opt_noreservation: clear_opt(sbi->s_mount_opt, RESERVATION); ext2_msg(sb, KERN_INFO, "reservations OFF"); break; case Opt_ignore: break; default: return 0; } } return 1; } static int ext2_setup_super (struct super_block * sb, struct ext2_super_block * es, int read_only) { int res = 0; struct ext2_sb_info *sbi = EXT2_SB(sb); if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) { ext2_msg(sb, KERN_ERR, "error: revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT2_VALID_FS)) ext2_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT2_ERROR_FS)) ext2_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 && le16_to_cpu(es->s_mnt_count) >= (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) ext2_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext2_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); if (!le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); if (test_opt (sb, DEBUG)) ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize, sbi->s_frag_size, sbi->s_groups_count, EXT2_BLOCKS_PER_GROUP(sb), EXT2_INODES_PER_GROUP(sb), sbi->s_mount_opt); return res; } static int ext2_check_descriptors(struct super_block *sb) { int i; struct ext2_sb_info *sbi = EXT2_SB(sb); ext2_debug ("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL); ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i); ext2_fsblk_t last_block; if (i == sbi->s_groups_count - 1) last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; else last_block = first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1); if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || le32_to_cpu(gdp->bg_block_bitmap) > last_block) { ext2_error (sb, "ext2_check_descriptors", "Block bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || le32_to_cpu(gdp->bg_inode_bitmap) > last_block) { ext2_error (sb, "ext2_check_descriptors", "Inode bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext2_error (sb, "ext2_check_descriptors", "Inode table for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); return 0; } } return 1; } /* * Maximal file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^32 sector limit. */ static loff_t ext2_max_size(int bits) { loff_t res = EXT2_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a * dense, file such that the total number of * sectors in the file, including data and all indirect blocks, * does not exceed 2^32 -1 * __u32 i_blocks representing the total number of * 512 bytes blocks of the file */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static unsigned long descriptor_loc(struct super_block *sb, unsigned long logic_sb_block, int nr) { struct ext2_sb_info *sbi = EXT2_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext2_bg_has_super(sb, bg)) has_super = 1; return ext2_group_first_block_no(sb, bg) + has_super; } static int ext2_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext2_sb_info * sbi; struct ext2_super_block * es; struct inode *root; unsigned long block; unsigned long sb_block = get_sb_block(&data); unsigned long logic_sb_block; unsigned long offset = 0; unsigned long def_mount_opts; long ret = -EINVAL; int blocksize = BLOCK_SIZE; int db_count; int i, j; __le32 features; int err; err = -ENOMEM; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto failed; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto failed; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; spin_lock_init(&sbi->s_lock); /* * See what the current blocksize for the device is, and * use that as the blocksize. Otherwise (or if the blocksize * is smaller than the default) use the default. * This is important for devices that have a hardware * sectorsize that is larger than the default. */ blocksize = sb_min_blocksize(sb, BLOCK_SIZE); if (!blocksize) { ext2_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto failed_sbi; } /* * If the superblock doesn't start on a hardware sector boundary, * calculate the offset. */ if (blocksize != BLOCK_SIZE) { logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext2_msg(sb, KERN_ERR, "error: unable to read superblock"); goto failed_sbi; } /* * Note: s_es must be initialized as soon as possible because * some ext2 macro-instructions depend on its value */ es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT2_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT2_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT2_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT2_FS_XATTR if (def_mount_opts & EXT2_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT2_FS_POSIX_ACL if (def_mount_opts & EXT2_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options((char *) data, sb)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); sb->s_iflags |= SB_I_CGROUPWB; if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext2_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP); if (features) { ext2_msg(sb, KERN_ERR, "error: couldn't mount because of " "unsupported optional features (%x)", le32_to_cpu(features)); goto failed_mount; } if (!(sb->s_flags & MS_RDONLY) && (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){ ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of " "unsupported optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { if (blocksize != PAGE_SIZE) { ext2_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); goto failed_mount; } if (!sb->s_bdev->bd_disk->fops->direct_access) { ext2_msg(sb, KERN_ERR, "error: device does not support dax"); goto failed_mount; } } /* If the blocksize doesn't match, re-read the thing.. */ if (sb->s_blocksize != blocksize) { brelse(bh); if (!sb_set_blocksize(sb, blocksize)) { ext2_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto failed_sbi; } logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; offset = (sb_block*BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if(!bh) { ext2_msg(sb, KERN_ERR, "error: couldn't read" "superblock on 2nd try"); goto failed_sbi; } es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) { ext2_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); sb->s_max_links = EXT2_LINK_MAX; if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) || !is_power_of_2(sbi->s_inode_size) || (sbi->s_inode_size > blocksize)) { ext2_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (sbi->s_frag_size == 0) goto cantfind_ext2; sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT2_INODE_SIZE(sb) == 0) goto cantfind_ext2; sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) goto cantfind_ext2; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = sb->s_blocksize / sizeof (struct ext2_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2 (EXT2_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2 (EXT2_DESC_PER_BLOCK(sb)); if (sb->s_magic != EXT2_SUPER_MAGIC) goto cantfind_ext2; if (sb->s_blocksize != bh->b_size) { if (!silent) ext2_msg(sb, KERN_ERR, "error: unsupported blocksize"); goto failed_mount; } if (sb->s_blocksize != sbi->s_frag_size) { ext2_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %lu" "(not supported yet)", sbi->s_frag_size, sb->s_blocksize); goto failed_mount; } if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, "error: #blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } if (EXT2_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext2; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT2_BLOCKS_PER_GROUP(sb)) + 1; db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext2_msg(sb, KERN_ERR, "error: not enough memory"); goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL); if (!sbi->s_debts) { ext2_msg(sb, KERN_ERR, "error: not enough memory"); goto failed_mount_group_desc; } for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { for (j = 0; j < i; j++) brelse (sbi->s_group_desc[j]); ext2_msg(sb, KERN_ERR, "error: unable to read group descriptors"); goto failed_mount_group_desc; } } if (!ext2_check_descriptors (sb)) { ext2_msg(sb, KERN_ERR, "group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* * Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext2_rsv_window_add(sb, &sbi->s_rsv_window_head); err = percpu_counter_init(&sbi->s_freeblocks_counter, ext2_count_free_blocks(sb), GFP_KERNEL); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext2_count_free_inodes(sb), GFP_KERNEL); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext2_count_dirs(sb), GFP_KERNEL); } if (err) { ext2_msg(sb, KERN_ERR, "error: insufficient memory"); goto failed_mount3; } /* * set up enough so that it can read an inode */ sb->s_op = &ext2_sops; sb->s_export_op = &ext2_export_ops; sb->s_xattr = ext2_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &dquot_operations; sb->s_qcop = &dquot_quotactl_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; #endif root = ext2_iget(sb, EXT2_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext2_msg(sb, KERN_ERR, "error: get root inode failed"); ret = -ENOMEM; goto failed_mount3; } if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) ext2_msg(sb, KERN_WARNING, "warning: mounting ext3 filesystem as ext2"); if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; ext2_write_super(sb); return 0; cantfind_ext2: if (!silent) ext2_msg(sb, KERN_ERR, "error: can't find an ext2 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); failed_mount_group_desc: kfree(sbi->s_group_desc); kfree(sbi->s_debts); failed_mount: brelse(bh); failed_sbi: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); failed: return ret; } static void ext2_clear_super_error(struct super_block *sb) { struct buffer_head *sbh = EXT2_SB(sb)->s_sbh; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext2_msg(sb, KERN_ERR, "previous I/O error to superblock detected\n"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es, int wait) { ext2_clear_super_error(sb); spin_lock(&EXT2_SB(sb)->s_lock); es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb)); es->s_wtime = cpu_to_le32(get_seconds()); /* unlock before we do IO */ spin_unlock(&EXT2_SB(sb)->s_lock); mark_buffer_dirty(EXT2_SB(sb)->s_sbh); if (wait) sync_dirty_buffer(EXT2_SB(sb)->s_sbh); } /* * In the second extended file system, it is not necessary to * write the super block since we use a mapping of the * disk super block in a buffer. * * However, this function is still used to set the fs valid * flags to 0. We need to set this flag to 0 since the fs * may have been checked while mounted and e2fsck may have * set s_state to EXT2_VALID_FS after some corrections. */ static int ext2_sync_fs(struct super_block *sb, int wait) { struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = EXT2_SB(sb)->s_es; /* * Write quota structures to quota file, sync_blockdev() will write * them to disk later */ dquot_writeback_dquots(sb, -1); spin_lock(&sbi->s_lock); if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) { ext2_debug("setting valid to 0\n"); es->s_state &= cpu_to_le16(~EXT2_VALID_FS); } spin_unlock(&sbi->s_lock); ext2_sync_super(sb, es, wait); return 0; } static int ext2_freeze(struct super_block *sb) { struct ext2_sb_info *sbi = EXT2_SB(sb); /* * Open but unlinked files present? Keep EXT2_VALID_FS flag cleared * because we have unattached inodes and thus filesystem is not fully * consistent. */ if (atomic_long_read(&sb->s_remove_count)) { ext2_sync_fs(sb, 1); return 0; } /* Set EXT2_FS_VALID flag */ spin_lock(&sbi->s_lock); sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state); spin_unlock(&sbi->s_lock); ext2_sync_super(sb, sbi->s_es, 1); return 0; } static int ext2_unfreeze(struct super_block *sb) { /* Just write sb to clear EXT2_VALID_FS flag */ ext2_write_super(sb); return 0; } void ext2_write_super(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) ext2_sync_fs(sb, 1); } static int ext2_remount (struct super_block * sb, int * flags, char * data) { struct ext2_sb_info * sbi = EXT2_SB(sb); struct ext2_super_block * es; struct ext2_mount_options old_opts; unsigned long old_sb_flags; int err; sync_filesystem(sb); spin_lock(&sbi->s_lock); /* Store the old options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb)) { err = -EINVAL; goto restore_opts; } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_DAX) { ext2_msg(sb, KERN_WARNING, "warning: refusing change of " "dax flag with busy inodes while remounting"); sbi->s_mount_opt ^= EXT2_MOUNT_DAX; } if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { spin_unlock(&sbi->s_lock); return 0; } if (*flags & MS_RDONLY) { if (le16_to_cpu(es->s_state) & EXT2_VALID_FS || !(sbi->s_mount_state & EXT2_VALID_FS)) { spin_unlock(&sbi->s_lock); return 0; } /* * OK, we are remounting a valid rw partition rdonly, so set * the rdonly flag and then mark the partition as valid again. */ es->s_state = cpu_to_le16(sbi->s_mount_state); es->s_mtime = cpu_to_le32(get_seconds()); spin_unlock(&sbi->s_lock); err = dquot_suspend(sb, -1); if (err < 0) { spin_lock(&sbi->s_lock); goto restore_opts; } ext2_sync_super(sb, es, 1); } else { __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP); if (ret) { ext2_msg(sb, KERN_WARNING, "warning: couldn't remount RDWR because of " "unsupported optional features (%x).", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread and * store the current valid flag. (It may have been changed * by e2fsck since we originally mounted the partition.) */ sbi->s_mount_state = le16_to_cpu(es->s_state); if (!ext2_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; spin_unlock(&sbi->s_lock); ext2_write_super(sb); dquot_resume(sb, -1); } return 0; restore_opts: sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sb->s_flags = old_sb_flags; spin_unlock(&sbi->s_lock); return err; } static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = sbi->s_es; u64 fsid; spin_lock(&sbi->s_lock); if (test_opt (sb, MINIX_DF)) sbi->s_overhead_last = 0; else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { unsigned long i, overhead = 0; smp_rmb(); /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and * block group descriptors. If the sparse superblocks * feature is turned on, then not all groups have this. */ for (i = 0; i < sbi->s_groups_count; i++) overhead += ext2_bg_has_super(sb, i) + ext2_bg_num_gdb(sb, i); /* * Every block group has an inode bitmap, a block * bitmap, and an inode table. */ overhead += (sbi->s_groups_count * (2 + sbi->s_itb_per_group)); sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); } buf->f_type = EXT2_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; buf->f_bfree = ext2_count_free_blocks(sb); es->s_free_blocks_count = cpu_to_le32(buf->f_bfree); buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = ext2_count_free_inodes(sb); es->s_free_inodes_count = cpu_to_le32(buf->f_ffree); buf->f_namelen = EXT2_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; spin_unlock(&sbi->s_lock); return 0; } static struct dentry *ext2_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super); } #ifdef CONFIG_QUOTA /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head tmp_bh; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; tmp_bh.b_size = sb->s_blocksize; err = ext2_get_block(inode, blk, &tmp_bh, 0); if (err < 0) return err; if (!buffer_mapped(&tmp_bh)) /* A hole? */ memset(data, 0, tocopy); else { bh = sb_bread(sb, tmp_bh.b_blocknr); if (!bh) return -EIO; memcpy(data, bh->b_data+offset, tocopy); brelse(bh); } offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile */ static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t towrite = len; struct buffer_head tmp_bh; struct buffer_head *bh; while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; tmp_bh.b_size = sb->s_blocksize; err = ext2_get_block(inode, blk, &tmp_bh, 1); if (err < 0) goto out; if (offset || tocopy != EXT2_BLOCK_SIZE(sb)) bh = sb_bread(sb, tmp_bh.b_blocknr); else bh = sb_getblk(sb, tmp_bh.b_blocknr); if (unlikely(!bh)) { err = -EIO; goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, tocopy); flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); brelse(bh); offset = 0; towrite -= tocopy; data += tocopy; blk++; } out: if (len == towrite) return err; if (inode->i_size < off+len-towrite) i_size_write(inode, off+len-towrite); inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); return len - towrite; } #endif static struct file_system_type ext2_fs_type = { .owner = THIS_MODULE, .name = "ext2", .mount = ext2_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext2"); static int __init init_ext2_fs(void) { int err = init_ext2_xattr(); if (err) return err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ext2_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: exit_ext2_xattr(); return err; } static void __exit exit_ext2_fs(void) { unregister_filesystem(&ext2_fs_type); destroy_inodecache(); exit_ext2_xattr(); } MODULE_AUTHOR("Remy Card and others"); MODULE_DESCRIPTION("Second Extended Filesystem"); MODULE_LICENSE("GPL"); module_init(init_ext2_fs) module_exit(exit_ext2_fs)
./CrossVul/dataset_final_sorted/CWE-19/c/bad_1843_1
crossvul-cpp_data_good_5319_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR GGG FFFFF % % R R G F % % RRRR G GG FFF % % R R G G F % % R R GGG F % % % % % % Read/Write LEGO Mindstorms EV3 Robot Graphics File % % % % Software Design % % Brian Wheeler % % August 2013 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/utility.h" /* Forward declarations. */ static MagickBooleanType WriteRGFImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d X B M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadRGFImage() reads an RGF bitmap image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadRGFImage method is: % % Image *ReadRGFImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadRGFImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; register unsigned char *p; size_t bit, byte; ssize_t y; unsigned char *data; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read RGF header. */ image->columns = (unsigned long) ReadBlobByte(image); image->rows = (unsigned long) ReadBlobByte(image); image->depth=8; image->storage_class=PseudoClass; image->colors=2; /* Initialize image structure. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize colormap. */ image->colormap[0].red=QuantumRange; image->colormap[0].green=QuantumRange; image->colormap[0].blue=QuantumRange; image->colormap[1].red=(Quantum) 0; image->colormap[1].green=(Quantum) 0; image->colormap[1].blue=(Quantum) 0; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Read hex image data. */ data=(unsigned char *) AcquireQuantumMemory(image->rows,image->columns* sizeof(*data)); if (data == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); p=data; for (i=0; i < (ssize_t) (image->columns * image->rows); i++) { *p++=ReadBlobByte(image); } /* Convert RGF image to pixel packets. */ p=data; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { if (bit == 0) byte=(size_t) (*p++); SetPixelIndex(indexes+x,(Quantum) ((byte & 0x01) != 0 ? 0x01 : 0x00)); bit++; byte>>=1; if (bit == 8) bit=0; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } data=(unsigned char *) RelinquishMagickMemory(data); (void) SyncImage(image); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r R G F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterRGFImage() adds attributes for the RGF image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterRGFImage method is: % % size_t RegisterRGFImage(void) % */ ModuleExport size_t RegisterRGFImage(void) { MagickInfo *entry; entry=SetMagickInfo("RGF"); entry->decoder=(DecodeImageHandler *) ReadRGFImage; entry->encoder=(EncodeImageHandler *) WriteRGFImage; entry->adjoin=MagickFalse; entry->description=ConstantString( "LEGO Mindstorms EV3 Robot Graphic Format (black and white)"); entry->module=ConstantString("RGF"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r R G F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterRGFImage() removes format registrations made by the % RGF module from the list of supported formats. % % The format of the UnregisterRGFImage method is: % % UnregisterRGFImage(void) % */ ModuleExport void UnregisterRGFImage(void) { (void) UnregisterMagickInfo("RGF"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e R G F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteRGFImage() writes an image to a file in the X bitmap format. % % The format of the WriteRGFImage method is: % % MagickBooleanType WriteRGFImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteRGFImage(const ImageInfo *image_info,Image *image) { MagickBooleanType status; int bit; register const PixelPacket *p; register ssize_t x; ssize_t y; unsigned char byte; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); if((image->columns > 255L) || (image->rows > 255L)) ThrowWriterException(ImageError,"Dimensions must be less than 255x255"); /* Write header (just the image dimensions) */ (void) WriteBlobByte(image,image->columns & 0xff); (void) WriteBlobByte(image,image->rows & 0xff); /* Convert MIFF to bit pixels. */ (void) SetImageType(image,BilevelType); x=0; y=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte>>=1; if (GetPixelLuma(image,p) < (QuantumRange/2.0)) byte|=0x80; bit++; if (bit == 8) { /* Write a bitmap byte to the image file. */ (void) WriteBlobByte(image,byte); bit=0; byte=0; } p++; } if (bit != 0) (void) WriteBlobByte(image,byte); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-19/c/good_5319_0
crossvul-cpp_data_good_1453_3
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_attr.h" #include "xfs_attr_leaf.h" #include "xfs_attr_remote.h" #include "xfs_trans_space.h" #include "xfs_trace.h" #include "xfs_cksum.h" #include "xfs_buf_item.h" #include "xfs_error.h" #define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */ /* * Each contiguous block has a header, so it is not just a simple attribute * length to FSB conversion. */ int xfs_attr3_rmt_blocks( struct xfs_mount *mp, int attrlen) { if (xfs_sb_version_hascrc(&mp->m_sb)) { int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize); return (attrlen + buflen - 1) / buflen; } return XFS_B_TO_FSB(mp, attrlen); } /* * Checking of the remote attribute header is split into two parts. The verifier * does CRC, location and bounds checking, the unpacking function checks the * attribute parameters and owner. */ static bool xfs_attr3_rmt_hdr_ok( struct xfs_mount *mp, void *ptr, xfs_ino_t ino, uint32_t offset, uint32_t size, xfs_daddr_t bno) { struct xfs_attr3_rmt_hdr *rmt = ptr; if (bno != be64_to_cpu(rmt->rm_blkno)) return false; if (offset != be32_to_cpu(rmt->rm_offset)) return false; if (size != be32_to_cpu(rmt->rm_bytes)) return false; if (ino != be64_to_cpu(rmt->rm_owner)) return false; /* ok */ return true; } static bool xfs_attr3_rmt_verify( struct xfs_mount *mp, void *ptr, int fsbsize, xfs_daddr_t bno) { struct xfs_attr3_rmt_hdr *rmt = ptr; if (!xfs_sb_version_hascrc(&mp->m_sb)) return false; if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC)) return false; if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid)) return false; if (be64_to_cpu(rmt->rm_blkno) != bno) return false; if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt)) return false; if (be32_to_cpu(rmt->rm_offset) + be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX) return false; if (rmt->rm_owner == 0) return false; return true; } static void xfs_attr3_rmt_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; char *ptr; int len; xfs_daddr_t bno; /* no verification of non-crc buffers */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; ptr = bp->b_addr; bno = bp->b_bn; len = BBTOB(bp->b_length); ASSERT(len >= XFS_LBSIZE(mp)); while (len > 0) { if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF)) { xfs_buf_ioerror(bp, EFSBADCRC); break; } if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) { xfs_buf_ioerror(bp, EFSCORRUPTED); break; } len -= XFS_LBSIZE(mp); ptr += XFS_LBSIZE(mp); bno += mp->m_bsize; } if (bp->b_error) xfs_verifier_error(bp); else ASSERT(len == 0); } static void xfs_attr3_rmt_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_buf_log_item *bip = bp->b_fspriv; char *ptr; int len; xfs_daddr_t bno; /* no verification of non-crc buffers */ if (!xfs_sb_version_hascrc(&mp->m_sb)) return; ptr = bp->b_addr; bno = bp->b_bn; len = BBTOB(bp->b_length); ASSERT(len >= XFS_LBSIZE(mp)); while (len > 0) { if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) { xfs_buf_ioerror(bp, EFSCORRUPTED); xfs_verifier_error(bp); return; } if (bip) { struct xfs_attr3_rmt_hdr *rmt; rmt = (struct xfs_attr3_rmt_hdr *)ptr; rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); } xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF); len -= XFS_LBSIZE(mp); ptr += XFS_LBSIZE(mp); bno += mp->m_bsize; } ASSERT(len == 0); } const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = { .verify_read = xfs_attr3_rmt_read_verify, .verify_write = xfs_attr3_rmt_write_verify, }; STATIC int xfs_attr3_rmt_hdr_set( struct xfs_mount *mp, void *ptr, xfs_ino_t ino, uint32_t offset, uint32_t size, xfs_daddr_t bno) { struct xfs_attr3_rmt_hdr *rmt = ptr; if (!xfs_sb_version_hascrc(&mp->m_sb)) return 0; rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC); rmt->rm_offset = cpu_to_be32(offset); rmt->rm_bytes = cpu_to_be32(size); uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid); rmt->rm_owner = cpu_to_be64(ino); rmt->rm_blkno = cpu_to_be64(bno); return sizeof(struct xfs_attr3_rmt_hdr); } /* * Helper functions to copy attribute data in and out of the one disk extents */ STATIC int xfs_attr_rmtval_copyout( struct xfs_mount *mp, struct xfs_buf *bp, xfs_ino_t ino, int *offset, int *valuelen, __uint8_t **dst) { char *src = bp->b_addr; xfs_daddr_t bno = bp->b_bn; int len = BBTOB(bp->b_length); ASSERT(len >= XFS_LBSIZE(mp)); while (len > 0 && *valuelen > 0) { int hdr_size = 0; int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp)); byte_cnt = min(*valuelen, byte_cnt); if (xfs_sb_version_hascrc(&mp->m_sb)) { if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset, byte_cnt, bno)) { xfs_alert(mp, "remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)", bno, *offset, byte_cnt, ino); return EFSCORRUPTED; } hdr_size = sizeof(struct xfs_attr3_rmt_hdr); } memcpy(*dst, src + hdr_size, byte_cnt); /* roll buffer forwards */ len -= XFS_LBSIZE(mp); src += XFS_LBSIZE(mp); bno += mp->m_bsize; /* roll attribute data forwards */ *valuelen -= byte_cnt; *dst += byte_cnt; *offset += byte_cnt; } return 0; } STATIC void xfs_attr_rmtval_copyin( struct xfs_mount *mp, struct xfs_buf *bp, xfs_ino_t ino, int *offset, int *valuelen, __uint8_t **src) { char *dst = bp->b_addr; xfs_daddr_t bno = bp->b_bn; int len = BBTOB(bp->b_length); ASSERT(len >= XFS_LBSIZE(mp)); while (len > 0 && *valuelen > 0) { int hdr_size; int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp)); byte_cnt = min(*valuelen, byte_cnt); hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset, byte_cnt, bno); memcpy(dst + hdr_size, *src, byte_cnt); /* * If this is the last block, zero the remainder of it. * Check that we are actually the last block, too. */ if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) { ASSERT(*valuelen - byte_cnt == 0); ASSERT(len == XFS_LBSIZE(mp)); memset(dst + hdr_size + byte_cnt, 0, XFS_LBSIZE(mp) - hdr_size - byte_cnt); } /* roll buffer forwards */ len -= XFS_LBSIZE(mp); dst += XFS_LBSIZE(mp); bno += mp->m_bsize; /* roll attribute data forwards */ *valuelen -= byte_cnt; *src += byte_cnt; *offset += byte_cnt; } } /* * Read the value associated with an attribute from the out-of-line buffer * that we stored it in. */ int xfs_attr_rmtval_get( struct xfs_da_args *args) { struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE]; struct xfs_mount *mp = args->dp->i_mount; struct xfs_buf *bp; xfs_dablk_t lblkno = args->rmtblkno; __uint8_t *dst = args->value; int valuelen; int nmap; int error; int blkcnt = args->rmtblkcnt; int i; int offset = 0; trace_xfs_attr_rmtval_get(args); ASSERT(!(args->flags & ATTR_KERNOVAL)); ASSERT(args->rmtvaluelen == args->valuelen); valuelen = args->rmtvaluelen; while (valuelen > 0) { nmap = ATTR_RMTVALUE_MAPSIZE; error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, blkcnt, map, &nmap, XFS_BMAPI_ATTRFORK); if (error) return error; ASSERT(nmap >= 1); for (i = 0; (i < nmap) && (valuelen > 0); i++) { xfs_daddr_t dblkno; int dblkcnt; ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && (map[i].br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dblkno, dblkcnt, 0, &bp, &xfs_attr3_rmt_buf_ops); if (error) return error; error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino, &offset, &valuelen, &dst); xfs_buf_relse(bp); if (error) return error; /* roll attribute extent map forwards */ lblkno += map[i].br_blockcount; blkcnt -= map[i].br_blockcount; } } ASSERT(valuelen == 0); return 0; } /* * Write the value associated with an attribute into the out-of-line buffer * that we have defined for it. */ int xfs_attr_rmtval_set( struct xfs_da_args *args) { struct xfs_inode *dp = args->dp; struct xfs_mount *mp = dp->i_mount; struct xfs_bmbt_irec map; xfs_dablk_t lblkno; xfs_fileoff_t lfileoff = 0; __uint8_t *src = args->value; int blkcnt; int valuelen; int nmap; int error; int offset = 0; trace_xfs_attr_rmtval_set(args); /* * Find a "hole" in the attribute address space large enough for * us to drop the new attribute's value into. Because CRC enable * attributes have headers, we can't just do a straight byte to FSB * conversion and have to take the header space into account. */ blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen); error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, XFS_ATTR_FORK); if (error) return error; args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; args->rmtblkcnt = blkcnt; /* * Roll through the "value", allocating blocks on disk as required. */ while (blkcnt > 0) { int committed; /* * Allocate a single extent, up to the size of the value. */ xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, blkcnt, XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, args->firstblock, args->total, &map, &nmap, args->flist); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); return(error); } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); ASSERT(nmap == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; /* * Start the next trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) return (error); } /* * Roll through the "value", copying the attribute value to the * already-allocated blocks. Blocks are written synchronously * so that we can know they are all on disk before we turn off * the INCOMPLETE flag. */ lblkno = args->rmtblkno; blkcnt = args->rmtblkcnt; valuelen = args->rmtvaluelen; while (valuelen > 0) { struct xfs_buf *bp; xfs_daddr_t dblkno; int dblkcnt; ASSERT(blkcnt > 0); xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno, blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); if (error) return(error); ASSERT(nmap == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); if (!bp) return ENOMEM; bp->b_ops = &xfs_attr3_rmt_buf_ops; xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, &valuelen, &src); error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ xfs_buf_relse(bp); if (error) return error; /* roll attribute extent map forwards */ lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; } ASSERT(valuelen == 0); return 0; } /* * Remove the value associated with an attribute by deleting the * out-of-line buffer that it is stored on. */ int xfs_attr_rmtval_remove( struct xfs_da_args *args) { struct xfs_mount *mp = args->dp->i_mount; xfs_dablk_t lblkno; int blkcnt; int error; int done; trace_xfs_attr_rmtval_remove(args); /* * Roll through the "value", invalidating the attribute value's blocks. */ lblkno = args->rmtblkno; blkcnt = args->rmtblkcnt; while (blkcnt > 0) { struct xfs_bmbt_irec map; struct xfs_buf *bp; xfs_daddr_t dblkno; int dblkcnt; int nmap; /* * Try to remember where we decided to put the value. */ nmap = 1; error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); if (error) return(error); ASSERT(nmap == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); /* * If the "remote" value is in the cache, remove it. */ bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK); if (bp) { xfs_buf_stale(bp); xfs_buf_relse(bp); bp = NULL; } lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; } /* * Keep de-allocating extents until the remote-value region is gone. */ lblkno = args->rmtblkno; blkcnt = args->rmtblkcnt; done = 0; while (!done) { int committed; xfs_bmap_init(args->flist, args->firstblock); error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 1, args->firstblock, args->flist, &done); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); return error; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, args->dp, 0); /* * Close out trans and start the next one in the chain. */ error = xfs_trans_roll(&args->trans, args->dp); if (error) return (error); } return(0); }
./CrossVul/dataset_final_sorted/CWE-19/c/good_1453_3
crossvul-cpp_data_good_1487_0
/* * Copyright (c) 2007-2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/module.h> #include <linux/init.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/net_namespace.h> #include <net/sock.h> static LIST_HEAD(nf_tables_expressions); /** * nft_register_afinfo - register nf_tables address family info * * @afi: address family info to register * * Register the address family for use with nf_tables. Returns zero on * success or a negative errno code otherwise. */ int nft_register_afinfo(struct net *net, struct nft_af_info *afi) { INIT_LIST_HEAD(&afi->tables); nfnl_lock(NFNL_SUBSYS_NFTABLES); list_add_tail_rcu(&afi->list, &net->nft.af_info); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } EXPORT_SYMBOL_GPL(nft_register_afinfo); /** * nft_unregister_afinfo - unregister nf_tables address family info * * @afi: address family info to unregister * * Unregister the address family for use with nf_tables. */ void nft_unregister_afinfo(struct nft_af_info *afi) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_del_rcu(&afi->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_afinfo); static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family) { struct nft_af_info *afi; list_for_each_entry(afi, &net->nft.af_info, list) { if (afi->family == family) return afi; } return NULL; } static struct nft_af_info * nf_tables_afinfo_lookup(struct net *net, int family, bool autoload) { struct nft_af_info *afi; afi = nft_afinfo_lookup(net, family); if (afi != NULL) return afi; #ifdef CONFIG_MODULES if (autoload) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-afinfo-%u", family); nfnl_lock(NFNL_SUBSYS_NFTABLES); afi = nft_afinfo_lookup(net, family); if (afi != NULL) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-EAFNOSUPPORT); } static void nft_ctx_init(struct nft_ctx *ctx, const struct sk_buff *skb, const struct nlmsghdr *nlh, struct nft_af_info *afi, struct nft_table *table, struct nft_chain *chain, const struct nlattr * const *nla) { ctx->net = sock_net(skb->sk); ctx->afi = afi; ctx->table = table; ctx->chain = chain; ctx->nla = nla; ctx->portid = NETLINK_CB(skb).portid; ctx->report = nlmsg_report(nlh); ctx->seq = nlh->nlmsg_seq; } static struct nft_trans *nft_trans_alloc(struct nft_ctx *ctx, int msg_type, u32 size) { struct nft_trans *trans; trans = kzalloc(sizeof(struct nft_trans) + size, GFP_KERNEL); if (trans == NULL) return NULL; trans->msg_type = msg_type; trans->ctx = *ctx; return trans; } static void nft_trans_destroy(struct nft_trans *trans) { list_del(&trans->list); kfree(trans); } static void nf_tables_unregister_hooks(const struct nft_table *table, const struct nft_chain *chain, unsigned int hook_nops) { if (!(table->flags & NFT_TABLE_F_DORMANT) && chain->flags & NFT_BASE_CHAIN) nf_unregister_hooks(nft_base_chain(chain)->ops, hook_nops); } /* Internal table flags */ #define NFT_TABLE_INACTIVE (1 << 15) static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table)); if (trans == NULL) return -ENOMEM; if (msg_type == NFT_MSG_NEWTABLE) ctx->table->flags |= NFT_TABLE_INACTIVE; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; } static int nft_deltable(struct nft_ctx *ctx) { int err; err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE); if (err < 0) return err; list_del_rcu(&ctx->table->list); return err; } static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); if (trans == NULL) return -ENOMEM; if (msg_type == NFT_MSG_NEWCHAIN) ctx->chain->flags |= NFT_CHAIN_INACTIVE; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; } static int nft_delchain(struct nft_ctx *ctx) { int err; err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); if (err < 0) return err; ctx->table->use--; list_del_rcu(&ctx->chain->list); return err; } static inline bool nft_rule_is_active(struct net *net, const struct nft_rule *rule) { return (rule->genmask & (1 << net->nft.gencursor)) == 0; } static inline int gencursor_next(struct net *net) { return net->nft.gencursor+1 == 1 ? 1 : 0; } static inline int nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) { return (rule->genmask & (1 << gencursor_next(net))) == 0; } static inline void nft_rule_activate_next(struct net *net, struct nft_rule *rule) { /* Now inactive, will be active in the future */ rule->genmask = (1 << net->nft.gencursor); } static inline void nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) { rule->genmask = (1 << gencursor_next(net)); } static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) { rule->genmask = 0; } static int nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) { /* You cannot delete the same rule twice */ if (nft_rule_is_active_next(ctx->net, rule)) { nft_rule_deactivate_next(ctx->net, rule); ctx->chain->use--; return 0; } return -ENOENT; } static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, struct nft_rule *rule) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule)); if (trans == NULL) return NULL; nft_trans_rule(trans) = rule; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return trans; } static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_trans *trans; int err; trans = nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule); if (trans == NULL) return -ENOMEM; err = nf_tables_delrule_deactivate(ctx, rule); if (err < 0) { nft_trans_destroy(trans); return err; } return 0; } static int nft_delrule_by_chain(struct nft_ctx *ctx) { struct nft_rule *rule; int err; list_for_each_entry(rule, &ctx->chain->rules, list) { err = nft_delrule(ctx, rule); if (err < 0) return err; } return 0; } /* Internal set flag */ #define NFT_SET_INACTIVE (1 << 15) static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, struct nft_set *set) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); if (trans == NULL) return -ENOMEM; if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { nft_trans_set_id(trans) = ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); set->flags |= NFT_SET_INACTIVE; } nft_trans_set(trans) = set; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; } static int nft_delset(struct nft_ctx *ctx, struct nft_set *set) { int err; err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set); if (err < 0) return err; list_del_rcu(&set->list); ctx->table->use--; return err; } /* * Tables */ static struct nft_table *nft_table_lookup(const struct nft_af_info *afi, const struct nlattr *nla) { struct nft_table *table; list_for_each_entry(table, &afi->tables, list) { if (!nla_strcmp(nla, table->name)) return table; } return NULL; } static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi, const struct nlattr *nla) { struct nft_table *table; if (nla == NULL) return ERR_PTR(-EINVAL); table = nft_table_lookup(afi, nla); if (table != NULL) return table; return ERR_PTR(-ENOENT); } static inline u64 nf_tables_alloc_handle(struct nft_table *table) { return ++table->hgenerator; } static const struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX]; static const struct nf_chain_type * __nf_tables_chain_type_lookup(int family, const struct nlattr *nla) { int i; for (i = 0; i < NFT_CHAIN_T_MAX; i++) { if (chain_type[family][i] != NULL && !nla_strcmp(nla, chain_type[family][i]->name)) return chain_type[family][i]; } return NULL; } static const struct nf_chain_type * nf_tables_chain_type_lookup(const struct nft_af_info *afi, const struct nlattr *nla, bool autoload) { const struct nf_chain_type *type; type = __nf_tables_chain_type_lookup(afi->family, nla); if (type != NULL) return type; #ifdef CONFIG_MODULES if (autoload) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-chain-%u-%.*s", afi->family, nla_len(nla), (const char *)nla_data(nla)); nfnl_lock(NFNL_SUBSYS_NFTABLES); type = __nf_tables_chain_type_lookup(afi->family, nla); if (type != NULL) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-ENOENT); } static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { [NFTA_TABLE_NAME] = { .type = NLA_STRING }, [NFTA_TABLE_FLAGS] = { .type = NLA_U32 }, }; static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) || nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) || nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq, event, 0, ctx->afi->family, ctx->table); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_dump_tables(struct sk_buff *skb, struct netlink_callback *cb) { const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); const struct nft_af_info *afi; const struct nft_table *table; unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; list_for_each_entry_rcu(table, &afi->tables, list) { if (idx < s_idx) goto cont; if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (nf_tables_fill_table_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWTABLE, NLM_F_MULTI, afi->family, table) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } done: rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_af_info *afi; const struct nft_table *table; struct sk_buff *skb2; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_tables, }; return netlink_dump_start(nlsk, skb, nlh, &c); } afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; err = nf_tables_fill_table_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static int nf_tables_table_enable(const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain; int err, i = 0; list_for_each_entry(chain, &table->chains, list) { if (!(chain->flags & NFT_BASE_CHAIN)) continue; err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops); if (err < 0) goto err; i++; } return 0; err: list_for_each_entry(chain, &table->chains, list) { if (!(chain->flags & NFT_BASE_CHAIN)) continue; if (i-- <= 0) break; nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops); } return err; } static void nf_tables_table_disable(const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain; list_for_each_entry(chain, &table->chains, list) { if (chain->flags & NFT_BASE_CHAIN) nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops); } } static int nf_tables_updtable(struct nft_ctx *ctx) { struct nft_trans *trans; u32 flags; int ret = 0; if (!ctx->nla[NFTA_TABLE_FLAGS]) return 0; flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS])); if (flags & ~NFT_TABLE_F_DORMANT) return -EINVAL; if (flags == ctx->table->flags) return 0; trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, sizeof(struct nft_trans_table)); if (trans == NULL) return -ENOMEM; if ((flags & NFT_TABLE_F_DORMANT) && !(ctx->table->flags & NFT_TABLE_F_DORMANT)) { nft_trans_table_enable(trans) = false; } else if (!(flags & NFT_TABLE_F_DORMANT) && ctx->table->flags & NFT_TABLE_F_DORMANT) { ret = nf_tables_table_enable(ctx->afi, ctx->table); if (ret >= 0) { ctx->table->flags &= ~NFT_TABLE_F_DORMANT; nft_trans_table_enable(trans) = true; } } if (ret < 0) goto err; nft_trans_table_update(trans) = true; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err: nft_trans_destroy(trans); return ret; } static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nlattr *name; struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; u32 flags = 0; struct nft_ctx ctx; int err; afi = nf_tables_afinfo_lookup(net, family, true); if (IS_ERR(afi)) return PTR_ERR(afi); name = nla[NFTA_TABLE_NAME]; table = nf_tables_table_lookup(afi, name); if (IS_ERR(table)) { if (PTR_ERR(table) != -ENOENT) return PTR_ERR(table); table = NULL; } if (table != NULL) { if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); return nf_tables_updtable(&ctx); } if (nla[NFTA_TABLE_FLAGS]) { flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS])); if (flags & ~NFT_TABLE_F_DORMANT) return -EINVAL; } if (!try_module_get(afi->owner)) return -EAFNOSUPPORT; table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL); if (table == NULL) { module_put(afi->owner); return -ENOMEM; } nla_strlcpy(table->name, name, nla_len(name)); INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); table->flags = flags; nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) { kfree(table); module_put(afi->owner); return err; } list_add_tail_rcu(&table->list, &afi->tables); return 0; } static int nft_flush_table(struct nft_ctx *ctx) { int err; struct nft_chain *chain, *nc; struct nft_set *set, *ns; list_for_each_entry(chain, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delrule_by_chain(ctx); if (err < 0) goto out; } list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { if (set->flags & NFT_SET_ANONYMOUS && !list_empty(&set->bindings)) continue; err = nft_delset(ctx, set); if (err < 0) goto out; } list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delchain(ctx); if (err < 0) goto out; } err = nft_deltable(ctx); out: return err; } static int nft_flush(struct nft_ctx *ctx, int family) { struct nft_af_info *afi; struct nft_table *table, *nt; const struct nlattr * const *nla = ctx->nla; int err = 0; list_for_each_entry(afi, &ctx->net->nft.af_info, list) { if (family != AF_UNSPEC && afi->family != family) continue; ctx->afi = afi; list_for_each_entry_safe(table, nt, &afi->tables, list) { if (nla[NFTA_TABLE_NAME] && nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) continue; ctx->table = table; err = nft_flush_table(ctx); if (err < 0) goto out; } } out: return err; } static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) return nft_flush(&ctx, family); afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; ctx.afi = afi; ctx.table = table; return nft_flush_table(&ctx); } static void nf_tables_table_destroy(struct nft_ctx *ctx) { BUG_ON(ctx->table->use > 0); kfree(ctx->table); module_put(ctx->afi->owner); } int nft_register_chain_type(const struct nf_chain_type *ctype) { int err = 0; nfnl_lock(NFNL_SUBSYS_NFTABLES); if (chain_type[ctype->family][ctype->type] != NULL) { err = -EBUSY; goto out; } chain_type[ctype->family][ctype->type] = ctype; out: nfnl_unlock(NFNL_SUBSYS_NFTABLES); return err; } EXPORT_SYMBOL_GPL(nft_register_chain_type); void nft_unregister_chain_type(const struct nf_chain_type *ctype) { nfnl_lock(NFNL_SUBSYS_NFTABLES); chain_type[ctype->family][ctype->type] = NULL; nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_chain_type); /* * Chains */ static struct nft_chain * nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle) { struct nft_chain *chain; list_for_each_entry(chain, &table->chains, list) { if (chain->handle == handle) return chain; } return ERR_PTR(-ENOENT); } static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, const struct nlattr *nla) { struct nft_chain *chain; if (nla == NULL) return ERR_PTR(-EINVAL); list_for_each_entry(chain, &table->chains, list) { if (!nla_strcmp(nla, chain->name)) return chain; } return ERR_PTR(-ENOENT); } static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, [NFTA_CHAIN_NAME] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, [NFTA_CHAIN_HOOK] = { .type = NLA_NESTED }, [NFTA_CHAIN_POLICY] = { .type = NLA_U32 }, [NFTA_CHAIN_TYPE] = { .type = NLA_STRING }, [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, }; static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = { [NFTA_HOOK_HOOKNUM] = { .type = NLA_U32 }, [NFTA_HOOK_PRIORITY] = { .type = NLA_U32 }, }; static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) { struct nft_stats *cpu_stats, total; struct nlattr *nest; unsigned int seq; u64 pkts, bytes; int cpu; memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); do { seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); pkts = cpu_stats->pkts; bytes = cpu_stats->bytes; } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); total.pkts += pkts; total.bytes += bytes; } nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); if (nest == NULL) goto nla_put_failure; if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) || nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes))) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: return -ENOSPC; } static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table, const struct nft_chain *chain) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle))) goto nla_put_failure; if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name)) goto nla_put_failure; if (chain->flags & NFT_BASE_CHAIN) { const struct nft_base_chain *basechain = nft_base_chain(chain); const struct nf_hook_ops *ops = &basechain->ops[0]; struct nlattr *nest; nest = nla_nest_start(skb, NFTA_CHAIN_HOOK); if (nest == NULL) goto nla_put_failure; if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority))) goto nla_put_failure; nla_nest_end(skb, nest); if (nla_put_be32(skb, NFTA_CHAIN_POLICY, htonl(basechain->policy))) goto nla_put_failure; if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) goto nla_put_failure; if (nft_dump_stats(skb, nft_base_chain(chain)->stats)) goto nla_put_failure; } if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, event, 0, ctx->afi->family, ctx->table, ctx->chain); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_dump_chains(struct sk_buff *skb, struct netlink_callback *cb) { const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(chain, &table->chains, list) { if (idx < s_idx) goto cont; if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (nf_tables_fill_chain_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, NLM_F_MULTI, afi->family, table, chain) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } } done: rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; struct sk_buff *skb2; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_chains, }; return netlink_dump_start(nlsk, skb, nlh, &c); } afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = { [NFTA_COUNTER_PACKETS] = { .type = NLA_U64 }, [NFTA_COUNTER_BYTES] = { .type = NLA_U64 }, }; static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) { struct nlattr *tb[NFTA_COUNTER_MAX+1]; struct nft_stats __percpu *newstats; struct nft_stats *stats; int err; err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy); if (err < 0) return ERR_PTR(err); if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) return ERR_PTR(-EINVAL); newstats = netdev_alloc_pcpu_stats(struct nft_stats); if (newstats == NULL) return ERR_PTR(-ENOMEM); /* Restore old counters on this cpu, no problem. Per-cpu statistics * are not exposed to userspace. */ stats = this_cpu_ptr(newstats); stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); return newstats; } static void nft_chain_stats_replace(struct nft_base_chain *chain, struct nft_stats __percpu *newstats) { if (newstats == NULL) return; if (chain->stats) { struct nft_stats __percpu *oldstats = nft_dereference(chain->stats); rcu_assign_pointer(chain->stats, newstats); synchronize_rcu(); free_percpu(oldstats); } else rcu_assign_pointer(chain->stats, newstats); } static void nf_tables_chain_destroy(struct nft_chain *chain) { BUG_ON(chain->use > 0); if (chain->flags & NFT_BASE_CHAIN) { module_put(nft_base_chain(chain)->type->owner); free_percpu(nft_base_chain(chain)->stats); kfree(nft_base_chain(chain)); } else { kfree(chain); } } static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nlattr * uninitialized_var(name); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; struct nft_base_chain *basechain = NULL; struct nlattr *ha[NFTA_HOOK_MAX + 1]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; u8 policy = NF_ACCEPT; u64 handle = 0; unsigned int i; struct nft_stats __percpu *stats; int err; bool create; struct nft_ctx ctx; create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; afi = nf_tables_afinfo_lookup(net, family, true); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); chain = NULL; name = nla[NFTA_CHAIN_NAME]; if (nla[NFTA_CHAIN_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); chain = nf_tables_chain_lookup_byhandle(table, handle); if (IS_ERR(chain)) return PTR_ERR(chain); } else { chain = nf_tables_chain_lookup(table, name); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) return PTR_ERR(chain); chain = NULL; } } if (nla[NFTA_CHAIN_POLICY]) { if ((chain != NULL && !(chain->flags & NFT_BASE_CHAIN)) || nla[NFTA_CHAIN_HOOK] == NULL) return -EOPNOTSUPP; policy = ntohl(nla_get_be32(nla[NFTA_CHAIN_POLICY])); switch (policy) { case NF_DROP: case NF_ACCEPT: break; default: return -EINVAL; } } if (chain != NULL) { struct nft_stats *stats = NULL; struct nft_trans *trans; if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; if (nla[NFTA_CHAIN_HANDLE] && name && !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]))) return -EEXIST; if (nla[NFTA_CHAIN_COUNTERS]) { if (!(chain->flags & NFT_BASE_CHAIN)) return -EOPNOTSUPP; stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); if (IS_ERR(stats)) return PTR_ERR(stats); } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); if (trans == NULL) return -ENOMEM; nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; if (nla[NFTA_CHAIN_POLICY]) nft_trans_chain_policy(trans) = policy; else nft_trans_chain_policy(trans) = -1; if (nla[NFTA_CHAIN_HANDLE] && name) { nla_strlcpy(nft_trans_chain_name(trans), name, NFT_CHAIN_MAXNAMELEN); } list_add_tail(&trans->list, &net->nft.commit_list); return 0; } if (table->use == UINT_MAX) return -EOVERFLOW; if (nla[NFTA_CHAIN_HOOK]) { const struct nf_chain_type *type; struct nf_hook_ops *ops; nf_hookfn *hookfn; u32 hooknum, priority; type = chain_type[family][NFT_CHAIN_T_DEFAULT]; if (nla[NFTA_CHAIN_TYPE]) { type = nf_tables_chain_type_lookup(afi, nla[NFTA_CHAIN_TYPE], create); if (IS_ERR(type)) return PTR_ERR(type); } err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK], nft_hook_policy); if (err < 0) return err; if (ha[NFTA_HOOK_HOOKNUM] == NULL || ha[NFTA_HOOK_PRIORITY] == NULL) return -EINVAL; hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); if (hooknum >= afi->nhooks) return -EINVAL; priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); if (!(type->hook_mask & (1 << hooknum))) return -EOPNOTSUPP; if (!try_module_get(type->owner)) return -ENOENT; hookfn = type->hooks[hooknum]; basechain = kzalloc(sizeof(*basechain), GFP_KERNEL); if (basechain == NULL) return -ENOMEM; if (nla[NFTA_CHAIN_COUNTERS]) { stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); if (IS_ERR(stats)) { module_put(type->owner); kfree(basechain); return PTR_ERR(stats); } basechain->stats = stats; } else { stats = netdev_alloc_pcpu_stats(struct nft_stats); if (stats == NULL) { module_put(type->owner); kfree(basechain); return -ENOMEM; } rcu_assign_pointer(basechain->stats, stats); } basechain->type = type; chain = &basechain->chain; for (i = 0; i < afi->nops; i++) { ops = &basechain->ops[i]; ops->pf = family; ops->owner = afi->owner; ops->hooknum = hooknum; ops->priority = priority; ops->priv = chain; ops->hook = afi->hooks[ops->hooknum]; if (hookfn) ops->hook = hookfn; if (afi->hook_ops_init) afi->hook_ops_init(ops, i); } chain->flags |= NFT_BASE_CHAIN; basechain->policy = policy; } else { chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) return -ENOMEM; } INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->net = net; chain->table = table; nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN); if (!(table->flags & NFT_TABLE_F_DORMANT) && chain->flags & NFT_BASE_CHAIN) { err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops); if (err < 0) goto err1; } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); if (err < 0) goto err2; table->use++; list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: nf_tables_unregister_hooks(table, chain, afi->nops); err1: nf_tables_chain_destroy(chain); return err; } static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; if (chain->use > 0) return -EBUSY; nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); return nft_delchain(&ctx); } /* * Expressions */ /** * nft_register_expr - register nf_tables expr type * @ops: expr type * * Registers the expr type for use with nf_tables. Returns zero on * success or a negative errno code otherwise. */ int nft_register_expr(struct nft_expr_type *type) { nfnl_lock(NFNL_SUBSYS_NFTABLES); if (type->family == NFPROTO_UNSPEC) list_add_tail_rcu(&type->list, &nf_tables_expressions); else list_add_rcu(&type->list, &nf_tables_expressions); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } EXPORT_SYMBOL_GPL(nft_register_expr); /** * nft_unregister_expr - unregister nf_tables expr type * @ops: expr type * * Unregisters the expr typefor use with nf_tables. */ void nft_unregister_expr(struct nft_expr_type *type) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_del_rcu(&type->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_expr); static const struct nft_expr_type *__nft_expr_type_get(u8 family, struct nlattr *nla) { const struct nft_expr_type *type; list_for_each_entry(type, &nf_tables_expressions, list) { if (!nla_strcmp(nla, type->name) && (!type->family || type->family == family)) return type; } return NULL; } static const struct nft_expr_type *nft_expr_type_get(u8 family, struct nlattr *nla) { const struct nft_expr_type *type; if (nla == NULL) return ERR_PTR(-EINVAL); type = __nft_expr_type_get(family, nla); if (type != NULL && try_module_get(type->owner)) return type; #ifdef CONFIG_MODULES if (type == NULL) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-expr-%u-%.*s", family, nla_len(nla), (char *)nla_data(nla)); nfnl_lock(NFNL_SUBSYS_NFTABLES); if (__nft_expr_type_get(family, nla)) return ERR_PTR(-EAGAIN); nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-expr-%.*s", nla_len(nla), (char *)nla_data(nla)); nfnl_lock(NFNL_SUBSYS_NFTABLES); if (__nft_expr_type_get(family, nla)) return ERR_PTR(-EAGAIN); } #endif return ERR_PTR(-ENOENT); } static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = { [NFTA_EXPR_NAME] = { .type = NLA_STRING }, [NFTA_EXPR_DATA] = { .type = NLA_NESTED }, }; static int nf_tables_fill_expr_info(struct sk_buff *skb, const struct nft_expr *expr) { if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name)) goto nla_put_failure; if (expr->ops->dump) { struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA); if (data == NULL) goto nla_put_failure; if (expr->ops->dump(skb, expr) < 0) goto nla_put_failure; nla_nest_end(skb, data); } return skb->len; nla_put_failure: return -1; }; struct nft_expr_info { const struct nft_expr_ops *ops; struct nlattr *tb[NFT_EXPR_MAXATTR + 1]; }; static int nf_tables_expr_parse(const struct nft_ctx *ctx, const struct nlattr *nla, struct nft_expr_info *info) { const struct nft_expr_type *type; const struct nft_expr_ops *ops; struct nlattr *tb[NFTA_EXPR_MAX + 1]; int err; err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy); if (err < 0) return err; type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]); if (IS_ERR(type)) return PTR_ERR(type); if (tb[NFTA_EXPR_DATA]) { err = nla_parse_nested(info->tb, type->maxattr, tb[NFTA_EXPR_DATA], type->policy); if (err < 0) goto err1; } else memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1)); if (type->select_ops != NULL) { ops = type->select_ops(ctx, (const struct nlattr * const *)info->tb); if (IS_ERR(ops)) { err = PTR_ERR(ops); goto err1; } } else ops = type->ops; info->ops = ops; return 0; err1: module_put(type->owner); return err; } static int nf_tables_newexpr(const struct nft_ctx *ctx, const struct nft_expr_info *info, struct nft_expr *expr) { const struct nft_expr_ops *ops = info->ops; int err; expr->ops = ops; if (ops->init) { err = ops->init(ctx, expr, (const struct nlattr **)info->tb); if (err < 0) goto err1; } return 0; err1: expr->ops = NULL; return err; } static void nf_tables_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr) { if (expr->ops->destroy) expr->ops->destroy(ctx, expr); module_put(expr->ops->type->owner); } /* * Rules */ static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain, u64 handle) { struct nft_rule *rule; // FIXME: this sucks list_for_each_entry(rule, &chain->rules, list) { if (handle == rule->handle) return rule; } return ERR_PTR(-ENOENT); } static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain, const struct nlattr *nla) { if (nla == NULL) return ERR_PTR(-EINVAL); return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla))); } static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { [NFTA_RULE_TABLE] = { .type = NLA_STRING }, [NFTA_RULE_CHAIN] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED }, [NFTA_RULE_COMPAT] = { .type = NLA_NESTED }, [NFTA_RULE_POSITION] = { .type = NLA_U64 }, [NFTA_RULE_USERDATA] = { .type = NLA_BINARY, .len = NFT_USERDATA_MAXLEN }, }; static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table, const struct nft_chain *chain, const struct nft_rule *rule) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; const struct nft_expr *expr, *next; struct nlattr *list; const struct nft_rule *prule; int type = event | NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_RULE_TABLE, table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle))) goto nla_put_failure; if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) { prule = list_entry(rule->list.prev, struct nft_rule, list); if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(prule->handle))) goto nla_put_failure; } list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS); if (list == NULL) goto nla_put_failure; nft_rule_for_each_expr(expr, next, rule) { struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM); if (elem == NULL) goto nla_put_failure; if (nf_tables_fill_expr_info(skb, expr) < 0) goto nla_put_failure; nla_nest_end(skb, elem); } nla_nest_end(skb, list); if (rule->ulen && nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_rule_notify(const struct nft_ctx *ctx, const struct nft_rule *rule, int event) { struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq, event, 0, ctx->afi->family, ctx->table, ctx->chain, rule); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) { const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; const struct nft_rule *rule; unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(chain, &table->chains, list) { list_for_each_entry_rcu(rule, &chain->rules, list) { if (!nft_rule_is_active(net, rule)) goto cont; if (idx < s_idx) goto cont; if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFT_MSG_NEWRULE, NLM_F_MULTI | NLM_F_APPEND, afi->family, table, chain, rule) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } } } done: rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; const struct nft_rule *rule; struct sk_buff *skb2; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; int err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_rules, }; return netlink_dump_start(nlsk, skb, nlh, &c); } afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_CHAIN_INACTIVE) return -ENOENT; rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); if (IS_ERR(rule)) return PTR_ERR(rule); skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) return -ENOMEM; err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_expr *expr; /* * Careful: some expressions might not be initialized in case this * is called on error from nf_tables_newrule(). */ expr = nft_expr_first(rule); while (expr->ops && expr != nft_expr_last(rule)) { nf_tables_expr_destroy(ctx, expr); expr = nft_expr_next(expr); } kfree(rule); } #define NFT_RULE_MAXEXPRS 128 static struct nft_expr_info *info; static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_chain *chain; struct nft_rule *rule, *old_rule = NULL; struct nft_trans *trans = NULL; struct nft_expr *expr; struct nft_ctx ctx; struct nlattr *tmp; unsigned int size, i, n, ulen = 0; int err, rem; bool create; u64 handle, pos_handle; create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); if (nla[NFTA_RULE_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE])); rule = __nf_tables_rule_lookup(chain, handle); if (IS_ERR(rule)) return PTR_ERR(rule); if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) old_rule = rule; else return -EOPNOTSUPP; } else { if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) return -EINVAL; handle = nf_tables_alloc_handle(table); if (chain->use == UINT_MAX) return -EOVERFLOW; } if (nla[NFTA_RULE_POSITION]) { if (!(nlh->nlmsg_flags & NLM_F_CREATE)) return -EOPNOTSUPP; pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); old_rule = __nf_tables_rule_lookup(chain, pos_handle); if (IS_ERR(old_rule)) return PTR_ERR(old_rule); } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); n = 0; size = 0; if (nla[NFTA_RULE_EXPRESSIONS]) { nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) { err = -EINVAL; if (nla_type(tmp) != NFTA_LIST_ELEM) goto err1; if (n == NFT_RULE_MAXEXPRS) goto err1; err = nf_tables_expr_parse(&ctx, tmp, &info[n]); if (err < 0) goto err1; size += info[n].ops->size; n++; } } if (nla[NFTA_RULE_USERDATA]) ulen = nla_len(nla[NFTA_RULE_USERDATA]); err = -ENOMEM; rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); if (rule == NULL) goto err1; nft_rule_activate_next(net, rule); rule->handle = handle; rule->dlen = size; rule->ulen = ulen; if (ulen) nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); expr = nft_expr_first(rule); for (i = 0; i < n; i++) { err = nf_tables_newexpr(&ctx, &info[i], expr); if (err < 0) goto err2; info[i].ops = NULL; expr = nft_expr_next(expr); } if (nlh->nlmsg_flags & NLM_F_REPLACE) { if (nft_rule_is_active_next(net, old_rule)) { trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, old_rule); if (trans == NULL) { err = -ENOMEM; goto err2; } nft_rule_deactivate_next(net, old_rule); chain->use--; list_add_tail_rcu(&rule->list, &old_rule->list); } else { err = -ENOENT; goto err2; } } else if (nlh->nlmsg_flags & NLM_F_APPEND) if (old_rule) list_add_rcu(&rule->list, &old_rule->list); else list_add_tail_rcu(&rule->list, &chain->rules); else { if (old_rule) list_add_tail_rcu(&rule->list, &old_rule->list); else list_add_rcu(&rule->list, &chain->rules); } if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { err = -ENOMEM; goto err3; } chain->use++; return 0; err3: list_del_rcu(&rule->list); if (trans) { list_del_rcu(&nft_trans_rule(trans)->list); nft_rule_clear(net, nft_trans_rule(trans)); nft_trans_destroy(trans); chain->use++; } err2: nf_tables_rule_destroy(&ctx, rule); err1: for (i = 0; i < n; i++) { if (info[i].ops != NULL) module_put(info[i].ops->type->owner); } return err; } static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_chain *chain = NULL; struct nft_rule *rule; int family = nfmsg->nfgen_family, err = 0; struct nft_ctx ctx; afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; if (nla[NFTA_RULE_CHAIN]) { chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); } nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); if (chain) { if (nla[NFTA_RULE_HANDLE]) { rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); if (IS_ERR(rule)) return PTR_ERR(rule); err = nft_delrule(&ctx, rule); } else { err = nft_delrule_by_chain(&ctx); } } else { list_for_each_entry(chain, &table->chains, list) { ctx.chain = chain; err = nft_delrule_by_chain(&ctx); if (err < 0) break; } } return err; } /* * Sets */ static LIST_HEAD(nf_tables_set_ops); int nft_register_set(struct nft_set_ops *ops) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_add_tail_rcu(&ops->list, &nf_tables_set_ops); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } EXPORT_SYMBOL_GPL(nft_register_set); void nft_unregister_set(struct nft_set_ops *ops) { nfnl_lock(NFNL_SUBSYS_NFTABLES); list_del_rcu(&ops->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_set); /* * Select a set implementation based on the data characteristics and the * given policy. The total memory use might not be known if no size is * given, in that case the amount of memory per element is used. */ static const struct nft_set_ops * nft_select_set_ops(const struct nlattr * const nla[], const struct nft_set_desc *desc, enum nft_set_policies policy) { const struct nft_set_ops *ops, *bops; struct nft_set_estimate est, best; u32 features; #ifdef CONFIG_MODULES if (list_empty(&nf_tables_set_ops)) { nfnl_unlock(NFNL_SUBSYS_NFTABLES); request_module("nft-set"); nfnl_lock(NFNL_SUBSYS_NFTABLES); if (!list_empty(&nf_tables_set_ops)) return ERR_PTR(-EAGAIN); } #endif features = 0; if (nla[NFTA_SET_FLAGS] != NULL) { features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); features &= NFT_SET_INTERVAL | NFT_SET_MAP; } bops = NULL; best.size = ~0; best.class = ~0; list_for_each_entry(ops, &nf_tables_set_ops, list) { if ((ops->features & features) != features) continue; if (!ops->estimate(desc, features, &est)) continue; switch (policy) { case NFT_SET_POL_PERFORMANCE: if (est.class < best.class) break; if (est.class == best.class && est.size < best.size) break; continue; case NFT_SET_POL_MEMORY: if (est.size < best.size) break; if (est.size == best.size && est.class < best.class) break; continue; default: break; } if (!try_module_get(ops->owner)) continue; if (bops != NULL) module_put(bops->owner); bops = ops; best = est; } if (bops != NULL) return bops; return ERR_PTR(-EOPNOTSUPP); } static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { [NFTA_SET_TABLE] = { .type = NLA_STRING }, [NFTA_SET_NAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, [NFTA_SET_FLAGS] = { .type = NLA_U32 }, [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 }, [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, [NFTA_SET_DATA_TYPE] = { .type = NLA_U32 }, [NFTA_SET_DATA_LEN] = { .type = NLA_U32 }, [NFTA_SET_POLICY] = { .type = NLA_U32 }, [NFTA_SET_DESC] = { .type = NLA_NESTED }, [NFTA_SET_ID] = { .type = NLA_U32 }, }; static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = { [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, }; static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, const struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { struct net *net = sock_net(skb->sk); const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi = NULL; struct nft_table *table = NULL; if (nfmsg->nfgen_family != NFPROTO_UNSPEC) { afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); if (IS_ERR(afi)) return PTR_ERR(afi); } if (nla[NFTA_SET_TABLE] != NULL) { if (afi == NULL) return -EAFNOSUPPORT; table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; } nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); return 0; } struct nft_set *nf_tables_set_lookup(const struct nft_table *table, const struct nlattr *nla) { struct nft_set *set; if (nla == NULL) return ERR_PTR(-EINVAL); list_for_each_entry(set, &table->sets, list) { if (!nla_strcmp(nla, set->name)) return set; } return ERR_PTR(-ENOENT); } struct nft_set *nf_tables_set_lookup_byid(const struct net *net, const struct nlattr *nla) { struct nft_trans *trans; u32 id = ntohl(nla_get_be32(nla)); list_for_each_entry(trans, &net->nft.commit_list, list) { if (trans->msg_type == NFT_MSG_NEWSET && id == nft_trans_set_id(trans)) return nft_trans_set(trans); } return ERR_PTR(-ENOENT); } static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, const char *name) { const struct nft_set *i; const char *p; unsigned long *inuse; unsigned int n = 0, min = 0; p = strnchr(name, IFNAMSIZ, '%'); if (p != NULL) { if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL); if (inuse == NULL) return -ENOMEM; cont: list_for_each_entry(i, &ctx->table->sets, list) { int tmp; if (!sscanf(i->name, name, &tmp)) continue; if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE) continue; set_bit(tmp - min, inuse); } n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE); if (n >= BITS_PER_BYTE * PAGE_SIZE) { min += BITS_PER_BYTE * PAGE_SIZE; memset(inuse, 0, PAGE_SIZE); goto cont; } free_page((unsigned long)inuse); } snprintf(set->name, sizeof(set->name), name, min + n); list_for_each_entry(i, &ctx->table->sets, list) { if (!strcmp(set->name, i->name)) return -ENFILE; } return 0; } static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, const struct nft_set *set, u16 event, u16 flags) { struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *desc; u32 portid = ctx->portid; u32 seq = ctx->seq; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx->afi->family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_NAME, set->name)) goto nla_put_failure; if (set->flags != 0) if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen))) goto nla_put_failure; if (set->flags & NFT_SET_MAP) { if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen))) goto nla_put_failure; } if (set->policy != NFT_SET_POL_PERFORMANCE) { if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy))) goto nla_put_failure; } desc = nla_nest_start(skb, NFTA_SET_DESC); if (desc == NULL) goto nla_put_failure; if (set->size && nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size))) goto nla_put_failure; nla_nest_end(skb, desc); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_set_notify(const struct nft_ctx *ctx, const struct nft_set *set, int event, gfp_t gfp_flags) { struct sk_buff *skb; u32 portid = ctx->portid; int err; if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, gfp_flags); if (skb == NULL) goto err; err = nf_tables_fill_set(skb, ctx, set, event, 0); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report, gfp_flags); err: if (err < 0) nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err); return err; } static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) { const struct nft_set *set; unsigned int idx, s_idx = cb->args[0]; struct nft_af_info *afi; struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; struct net *net = sock_net(skb->sk); int cur_family = cb->args[3]; struct nft_ctx *ctx = cb->data, ctx_set; if (cb->args[1]) return skb->len; rcu_read_lock(); cb->seq = net->nft.base_seq; list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (ctx->afi && ctx->afi != afi) continue; if (cur_family) { if (afi->family != cur_family) continue; cur_family = 0; } list_for_each_entry_rcu(table, &afi->tables, list) { if (ctx->table && ctx->table != table) continue; if (cur_table) { if (cur_table != table) continue; cur_table = NULL; } idx = 0; list_for_each_entry_rcu(set, &table->sets, list) { if (idx < s_idx) goto cont; ctx_set = *ctx; ctx_set.table = table; ctx_set.afi = afi; if (nf_tables_fill_set(skb, &ctx_set, set, NFT_MSG_NEWSET, NLM_F_MULTI) < 0) { cb->args[0] = idx; cb->args[2] = (unsigned long) table; cb->args[3] = afi->family; goto done; } nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } if (s_idx) s_idx = 0; } } cb->args[1] = 1; done: rcu_read_unlock(); return skb->len; } static int nf_tables_dump_sets_done(struct netlink_callback *cb) { kfree(cb->data); return 0; } static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nft_set *set; struct nft_ctx ctx; struct sk_buff *skb2; const struct nfgenmsg *nfmsg = nlmsg_data(nlh); int err; /* Verify existence before starting dump */ err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); if (err < 0) return err; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_sets, .done = nf_tables_dump_sets_done, }; struct nft_ctx *ctx_dump; ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_KERNEL); if (ctx_dump == NULL) return -ENOMEM; *ctx_dump = ctx; c.data = ctx_dump; return netlink_dump_start(nlsk, skb, nlh, &c); } /* Only accept unspec with dump */ if (nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) return -ENOMEM; err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *da[NFTA_SET_DESC_MAX + 1]; int err; err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy); if (err < 0) return err; if (da[NFTA_SET_DESC_SIZE] != NULL) desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE])); return 0; } static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_set_ops *ops; struct nft_af_info *afi; struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; char name[IFNAMSIZ]; unsigned int size; bool create; u32 ktype, dtype, flags, policy; struct nft_set_desc desc; int err; if (nla[NFTA_SET_TABLE] == NULL || nla[NFTA_SET_NAME] == NULL || nla[NFTA_SET_KEY_LEN] == NULL || nla[NFTA_SET_ID] == NULL) return -EINVAL; memset(&desc, 0, sizeof(desc)); ktype = NFT_DATA_VALUE; if (nla[NFTA_SET_KEY_TYPE] != NULL) { ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE])); if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK) return -EINVAL; } desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN])); if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data)) return -EINVAL; flags = 0; if (nla[NFTA_SET_FLAGS] != NULL) { flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS])); if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT | NFT_SET_INTERVAL | NFT_SET_MAP)) return -EINVAL; } dtype = 0; if (nla[NFTA_SET_DATA_TYPE] != NULL) { if (!(flags & NFT_SET_MAP)) return -EINVAL; dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE])); if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK && dtype != NFT_DATA_VERDICT) return -EINVAL; if (dtype != NFT_DATA_VERDICT) { if (nla[NFTA_SET_DATA_LEN] == NULL) return -EINVAL; desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN])); if (desc.dlen == 0 || desc.dlen > FIELD_SIZEOF(struct nft_data, data)) return -EINVAL; } else desc.dlen = sizeof(struct nft_data); } else if (flags & NFT_SET_MAP) return -EINVAL; policy = NFT_SET_POL_PERFORMANCE; if (nla[NFTA_SET_POLICY] != NULL) policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY])); if (nla[NFTA_SET_DESC] != NULL) { err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]); if (err < 0) return err; } create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) { if (PTR_ERR(set) != -ENOENT) return PTR_ERR(set); set = NULL; } if (set != NULL) { if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; return 0; } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) return -ENOENT; ops = nft_select_set_ops(nla, &desc, policy); if (IS_ERR(ops)) return PTR_ERR(ops); size = 0; if (ops->privsize != NULL) size = ops->privsize(nla); err = -ENOMEM; set = kzalloc(sizeof(*set) + size, GFP_KERNEL); if (set == NULL) goto err1; nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name)); err = nf_tables_set_alloc_name(&ctx, set, name); if (err < 0) goto err2; INIT_LIST_HEAD(&set->bindings); set->ops = ops; set->ktype = ktype; set->klen = desc.klen; set->dtype = dtype; set->dlen = desc.dlen; set->flags = flags; set->size = desc.size; set->policy = policy; err = ops->init(set, &desc, nla); if (err < 0) goto err2; err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) goto err2; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; err2: kfree(set); err1: module_put(ops->owner); return err; } static void nft_set_destroy(struct nft_set *set) { set->ops->destroy(set); module_put(set->ops->owner); kfree(set); } static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) { list_del_rcu(&set->list); nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); nft_set_destroy(set); } static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_set *set; struct nft_ctx ctx; int err; if (nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; if (nla[NFTA_SET_TABLE] == NULL) return -EINVAL; err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; if (!list_empty(&set->bindings)) return -EBUSY; return nft_delset(&ctx, set); } static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) { enum nft_registers dreg; dreg = nft_type_to_reg(set->dtype); return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE); } int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { struct nft_set_binding *i; struct nft_set_iter iter; if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS) return -EBUSY; if (set->flags & NFT_SET_MAP) { /* If the set is already bound to the same chain all * jumps are already validated for that chain. */ list_for_each_entry(i, &set->bindings, list) { if (i->chain == binding->chain) goto bind; } iter.skip = 0; iter.count = 0; iter.err = 0; iter.fn = nf_tables_bind_check_setelem; set->ops->walk(ctx, set, &iter); if (iter.err < 0) { /* Destroy anonymous sets if binding fails */ if (set->flags & NFT_SET_ANONYMOUS) nf_tables_set_destroy(ctx, set); return iter.err; } } bind: binding->chain = ctx->chain; list_add_tail_rcu(&binding->list, &set->bindings); return 0; } void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { list_del_rcu(&binding->list); if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && !(set->flags & NFT_SET_INACTIVE)) nf_tables_set_destroy(ctx, set); } /* * Set elements */ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = { [NFTA_SET_ELEM_KEY] = { .type = NLA_NESTED }, [NFTA_SET_ELEM_DATA] = { .type = NLA_NESTED }, [NFTA_SET_ELEM_FLAGS] = { .type = NLA_U32 }, }; static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, }; static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, const struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], bool trans) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); if (IS_ERR(afi)) return PTR_ERR(afi); table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); if (IS_ERR(table)) return PTR_ERR(table); if (!trans && (table->flags & NFT_TABLE_INACTIVE)) return -ENOENT; nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); return 0; } static int nf_tables_fill_setelem(struct sk_buff *skb, const struct nft_set *set, const struct nft_set_elem *elem) { unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; nest = nla_nest_start(skb, NFTA_LIST_ELEM); if (nest == NULL) goto nla_put_failure; if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE, set->klen) < 0) goto nla_put_failure; if (set->flags & NFT_SET_MAP && !(elem->flags & NFT_SET_ELEM_INTERVAL_END) && nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data, set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE, set->dlen) < 0) goto nla_put_failure; if (elem->flags != 0) if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags))) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nlmsg_trim(skb, b); return -EMSGSIZE; } struct nft_set_dump_args { const struct netlink_callback *cb; struct nft_set_iter iter; struct sk_buff *skb; }; static int nf_tables_dump_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) { struct nft_set_dump_args *args; args = container_of(iter, struct nft_set_dump_args, iter); return nf_tables_fill_setelem(args->skb, set, elem); } static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) { const struct nft_set *set; struct nft_set_dump_args args; struct nft_ctx ctx; struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *nest; u32 portid, seq; int event, err; err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy); if (err < 0) return err; err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, false); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; event = NFT_MSG_NEWSETELEM; event |= NFNL_SUBSYS_NFTABLES << 8; portid = NETLINK_CB(cb->skb).portid; seq = cb->nlh->nlmsg_seq; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), NLM_F_MULTI); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx.afi->family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) goto nla_put_failure; nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS); if (nest == NULL) goto nla_put_failure; args.cb = cb; args.skb = skb; args.iter.skip = cb->args[0]; args.iter.count = 0; args.iter.err = 0; args.iter.fn = nf_tables_dump_setelem; set->ops->walk(&ctx, set, &args.iter); nla_nest_end(skb, nest); nlmsg_end(skb, nlh); if (args.iter.err && args.iter.err != -EMSGSIZE) return args.iter.err; if (args.iter.count == cb->args[0]) return 0; cb->args[0] = args.iter.count; return skb->len; nla_put_failure: return -ENOSPC; } static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nft_set *set; struct nft_ctx ctx; int err; err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nf_tables_dump_set, }; return netlink_dump_start(nlsk, skb, nlh, &c); } return -EOPNOTSUPP; } static int nf_tables_fill_setelem_info(struct sk_buff *skb, const struct nft_ctx *ctx, u32 seq, u32 portid, int event, u16 flags, const struct nft_set *set, const struct nft_set_elem *elem) { struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *nest; int err; event |= NFNL_SUBSYS_NFTABLES << 8; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = ctx->afi->family; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_NAME, set->name)) goto nla_put_failure; nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS); if (nest == NULL) goto nla_put_failure; err = nf_tables_fill_setelem(skb, set, elem); if (err < 0) goto nla_put_failure; nla_nest_end(skb, nest); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -1; } static int nf_tables_setelem_notify(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_elem *elem, int event, u16 flags) { struct net *net = ctx->net; u32 portid = ctx->portid; struct sk_buff *skb; int err; if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb == NULL) goto err; err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags, set, elem); if (err < 0) { kfree_skb(skb); goto err; } err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, GFP_KERNEL); err: if (err < 0) nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err); return err; } static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx, int msg_type, struct nft_set *set) { struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem)); if (trans == NULL) return NULL; nft_trans_elem_set(trans) = set; return trans; } static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; struct nft_data_desc d1, d2; struct nft_set_elem elem; struct nft_set_binding *binding; enum nft_registers dreg; struct nft_trans *trans; int err; if (set->size && set->nelems == set->size) return -ENFILE; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, nft_set_elem_policy); if (err < 0) return err; if (nla[NFTA_SET_ELEM_KEY] == NULL) return -EINVAL; elem.flags = 0; if (nla[NFTA_SET_ELEM_FLAGS] != NULL) { elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS])); if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END) return -EINVAL; } if (set->flags & NFT_SET_MAP) { if (nla[NFTA_SET_ELEM_DATA] == NULL && !(elem.flags & NFT_SET_ELEM_INTERVAL_END)) return -EINVAL; if (nla[NFTA_SET_ELEM_DATA] != NULL && elem.flags & NFT_SET_ELEM_INTERVAL_END) return -EINVAL; } else { if (nla[NFTA_SET_ELEM_DATA] != NULL) return -EINVAL; } err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]); if (err < 0) goto err1; err = -EINVAL; if (d1.type != NFT_DATA_VALUE || d1.len != set->klen) goto err2; err = -EEXIST; if (set->ops->get(set, &elem) == 0) goto err2; if (nla[NFTA_SET_ELEM_DATA] != NULL) { err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]); if (err < 0) goto err2; err = -EINVAL; if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen) goto err3; dreg = nft_type_to_reg(set->dtype); list_for_each_entry(binding, &set->bindings, list) { struct nft_ctx bind_ctx = { .afi = ctx->afi, .table = ctx->table, .chain = (struct nft_chain *)binding->chain, }; err = nft_validate_data_load(&bind_ctx, dreg, &elem.data, d2.type); if (err < 0) goto err3; } } trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set); if (trans == NULL) goto err3; err = set->ops->insert(set, &elem); if (err < 0) goto err4; nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err4: kfree(trans); err3: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_uninit(&elem.data, d2.type); err2: nft_data_uninit(&elem.key, d1.type); err1: return err; } static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { struct net *net = sock_net(skb->sk); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; int rem, err = 0; if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL; err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) { if (nla[NFTA_SET_ELEM_LIST_SET_ID]) { set = nf_tables_set_lookup_byid(net, nla[NFTA_SET_ELEM_LIST_SET_ID]); } if (IS_ERR(set)) return PTR_ERR(set); } if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY; nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { err = nft_add_set_elem(&ctx, set, attr); if (err < 0) break; set->nelems++; } return err; } static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; struct nft_data_desc desc; struct nft_set_elem elem; struct nft_trans *trans; int err; err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr, nft_set_elem_policy); if (err < 0) goto err1; err = -EINVAL; if (nla[NFTA_SET_ELEM_KEY] == NULL) goto err1; err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]); if (err < 0) goto err1; err = -EINVAL; if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) goto err2; err = set->ops->get(set, &elem); if (err < 0) goto err2; trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set); if (trans == NULL) { err = -ENOMEM; goto err2; } nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err2: nft_data_uninit(&elem.key, desc.type); err1: return err; } static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; int rem, err = 0; if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL; err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY; nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { err = nft_del_setelem(&ctx, set, attr); if (err < 0) break; set->nelems--; } return err; } static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_NEWGEN; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); if (nlh == NULL) goto nla_put_failure; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = AF_UNSPEC; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq))) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_trim(skb, nlh); return -EMSGSIZE; } static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) { struct nlmsghdr *nlh = nlmsg_hdr(skb); struct sk_buff *skb2; int err; if (nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return 0; err = -ENOBUFS; skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) goto err; err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) { kfree_skb(skb2); goto err; } err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL); err: if (err < 0) { nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES, err); } return err; } static int nf_tables_getgen(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { struct net *net = sock_net(skb->sk); struct sk_buff *skb2; int err; skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) return -ENOMEM; err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) goto err; return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); err: kfree_skb(skb2); return err; } static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { [NFT_MSG_NEWTABLE] = { .call_batch = nf_tables_newtable, .attr_count = NFTA_TABLE_MAX, .policy = nft_table_policy, }, [NFT_MSG_GETTABLE] = { .call = nf_tables_gettable, .attr_count = NFTA_TABLE_MAX, .policy = nft_table_policy, }, [NFT_MSG_DELTABLE] = { .call_batch = nf_tables_deltable, .attr_count = NFTA_TABLE_MAX, .policy = nft_table_policy, }, [NFT_MSG_NEWCHAIN] = { .call_batch = nf_tables_newchain, .attr_count = NFTA_CHAIN_MAX, .policy = nft_chain_policy, }, [NFT_MSG_GETCHAIN] = { .call = nf_tables_getchain, .attr_count = NFTA_CHAIN_MAX, .policy = nft_chain_policy, }, [NFT_MSG_DELCHAIN] = { .call_batch = nf_tables_delchain, .attr_count = NFTA_CHAIN_MAX, .policy = nft_chain_policy, }, [NFT_MSG_NEWRULE] = { .call_batch = nf_tables_newrule, .attr_count = NFTA_RULE_MAX, .policy = nft_rule_policy, }, [NFT_MSG_GETRULE] = { .call = nf_tables_getrule, .attr_count = NFTA_RULE_MAX, .policy = nft_rule_policy, }, [NFT_MSG_DELRULE] = { .call_batch = nf_tables_delrule, .attr_count = NFTA_RULE_MAX, .policy = nft_rule_policy, }, [NFT_MSG_NEWSET] = { .call_batch = nf_tables_newset, .attr_count = NFTA_SET_MAX, .policy = nft_set_policy, }, [NFT_MSG_GETSET] = { .call = nf_tables_getset, .attr_count = NFTA_SET_MAX, .policy = nft_set_policy, }, [NFT_MSG_DELSET] = { .call_batch = nf_tables_delset, .attr_count = NFTA_SET_MAX, .policy = nft_set_policy, }, [NFT_MSG_NEWSETELEM] = { .call_batch = nf_tables_newsetelem, .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, [NFT_MSG_GETSETELEM] = { .call = nf_tables_getsetelem, .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, [NFT_MSG_DELSETELEM] = { .call_batch = nf_tables_delsetelem, .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, [NFT_MSG_GETGEN] = { .call = nf_tables_getgen, }, }; static void nft_chain_commit_update(struct nft_trans *trans) { struct nft_base_chain *basechain; if (nft_trans_chain_name(trans)[0]) strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN)) return; basechain = nft_base_chain(trans->ctx.chain); nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans)); switch (nft_trans_chain_policy(trans)) { case NF_DROP: case NF_ACCEPT: basechain->policy = nft_trans_chain_policy(trans); break; } } static void nf_tables_commit_release(struct nft_trans *trans) { switch (trans->msg_type) { case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_DELCHAIN: nf_tables_chain_destroy(trans->ctx.chain); break; case NFT_MSG_DELRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_DELSET: nft_set_destroy(nft_trans_set(trans)); break; } kfree(trans); } static int nf_tables_commit(struct sk_buff *skb) { struct net *net = sock_net(skb->sk); struct nft_trans *trans, *next; struct nft_trans_elem *te; /* Bump generation counter, invalidate any dump in progress */ while (++net->nft.base_seq == 0); /* A new generation has just started */ net->nft.gencursor = gencursor_next(net); /* Make sure all packets have left the previous generation before * purging old rules. */ synchronize_rcu(); list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (!nft_trans_table_enable(trans)) { nf_tables_table_disable(trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } } else { trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE; } nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE); nft_trans_destroy(trans); break; case NFT_MSG_DELTABLE: nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) nft_chain_commit_update(trans); else trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE; nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); nft_trans_destroy(trans); break; case NFT_MSG_DELCHAIN: nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); nf_tables_unregister_hooks(trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); break; case NFT_MSG_NEWRULE: nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); nft_trans_destroy(trans); break; case NFT_MSG_DELRULE: list_del_rcu(&nft_trans_rule(trans)->list); nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_DELRULE); break; case NFT_MSG_NEWSET: nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE; /* This avoids hitting -EBUSY when deleting the table * from the transaction. */ if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS && !list_empty(&nft_trans_set(trans)->bindings)) trans->ctx.table->use--; nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); nft_trans_destroy(trans); break; case NFT_MSG_DELSET: nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_DELSET, GFP_KERNEL); break; case NFT_MSG_NEWSETELEM: nf_tables_setelem_notify(&trans->ctx, nft_trans_elem_set(trans), &nft_trans_elem(trans), NFT_MSG_NEWSETELEM, 0); nft_trans_destroy(trans); break; case NFT_MSG_DELSETELEM: te = (struct nft_trans_elem *)trans->data; nf_tables_setelem_notify(&trans->ctx, te->set, &te->elem, NFT_MSG_DELSETELEM, 0); te->set->ops->get(te->set, &te->elem); te->set->ops->remove(te->set, &te->elem); nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); if (te->elem.flags & NFT_SET_MAP) { nft_data_uninit(&te->elem.data, te->set->dtype); } nft_trans_destroy(trans); break; } } synchronize_rcu(); list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); nf_tables_commit_release(trans); } nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); return 0; } static void nf_tables_abort_release(struct nft_trans *trans) { switch (trans->msg_type) { case NFT_MSG_NEWTABLE: nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_NEWCHAIN: nf_tables_chain_destroy(trans->ctx.chain); break; case NFT_MSG_NEWRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_NEWSET: nft_set_destroy(nft_trans_set(trans)); break; } kfree(trans); } static int nf_tables_abort(struct sk_buff *skb) { struct net *net = sock_net(skb->sk); struct nft_trans *trans, *next; struct nft_set *set; list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (nft_trans_table_enable(trans)) { nf_tables_table_disable(trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } nft_trans_destroy(trans); } else { list_del_rcu(&trans->ctx.table->list); } break; case NFT_MSG_DELTABLE: list_add_tail_rcu(&trans->ctx.table->list, &trans->ctx.afi->tables); nft_trans_destroy(trans); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { free_percpu(nft_trans_chain_stats(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); nf_tables_unregister_hooks(trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); } break; case NFT_MSG_DELCHAIN: trans->ctx.table->use++; list_add_tail_rcu(&trans->ctx.chain->list, &trans->ctx.table->chains); nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: trans->ctx.chain->use--; list_del_rcu(&nft_trans_rule(trans)->list); break; case NFT_MSG_DELRULE: trans->ctx.chain->use++; nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: trans->ctx.table->use--; list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: trans->ctx.table->use++; list_add_tail_rcu(&nft_trans_set(trans)->list, &trans->ctx.table->sets); nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: nft_trans_elem_set(trans)->nelems--; set = nft_trans_elem_set(trans); set->ops->get(set, &nft_trans_elem(trans)); set->ops->remove(set, &nft_trans_elem(trans)); nft_trans_destroy(trans); break; case NFT_MSG_DELSETELEM: nft_trans_elem_set(trans)->nelems++; nft_trans_destroy(trans); break; } } synchronize_rcu(); list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); nf_tables_abort_release(trans); } return 0; } static const struct nfnetlink_subsystem nf_tables_subsys = { .name = "nf_tables", .subsys_id = NFNL_SUBSYS_NFTABLES, .cb_count = NFT_MSG_MAX, .cb = nf_tables_cb, .commit = nf_tables_commit, .abort = nf_tables_abort, }; int nft_chain_validate_dependency(const struct nft_chain *chain, enum nft_chain_type type) { const struct nft_base_chain *basechain; if (chain->flags & NFT_BASE_CHAIN) { basechain = nft_base_chain(chain); if (basechain->type->type != type) return -EOPNOTSUPP; } return 0; } EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); /* * Loop detection - walk through the ruleset beginning at the destination chain * of a new jump until either the source chain is reached (loop) or all * reachable chains have been traversed. * * The loop check is performed whenever a new jump verdict is added to an * expression or verdict map or a verdict map is bound to a new chain. */ static int nf_tables_check_loops(const struct nft_ctx *ctx, const struct nft_chain *chain); static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) { if (elem->flags & NFT_SET_ELEM_INTERVAL_END) return 0; switch (elem->data.verdict) { case NFT_JUMP: case NFT_GOTO: return nf_tables_check_loops(ctx, elem->data.chain); default: return 0; } } static int nf_tables_check_loops(const struct nft_ctx *ctx, const struct nft_chain *chain) { const struct nft_rule *rule; const struct nft_expr *expr, *last; const struct nft_set *set; struct nft_set_binding *binding; struct nft_set_iter iter; if (ctx->chain == chain) return -ELOOP; list_for_each_entry(rule, &chain->rules, list) { nft_rule_for_each_expr(expr, last, rule) { const struct nft_data *data = NULL; int err; if (!expr->ops->validate) continue; err = expr->ops->validate(ctx, expr, &data); if (err < 0) return err; if (data == NULL) continue; switch (data->verdict) { case NFT_JUMP: case NFT_GOTO: err = nf_tables_check_loops(ctx, data->chain); if (err < 0) return err; default: break; } } } list_for_each_entry(set, &ctx->table->sets, list) { if (!(set->flags & NFT_SET_MAP) || set->dtype != NFT_DATA_VERDICT) continue; list_for_each_entry(binding, &set->bindings, list) { if (binding->chain != chain) continue; iter.skip = 0; iter.count = 0; iter.err = 0; iter.fn = nf_tables_loop_check_setelem; set->ops->walk(ctx, set, &iter); if (iter.err < 0) return iter.err; } } return 0; } /** * nft_validate_input_register - validate an expressions' input register * * @reg: the register number * * Validate that the input register is one of the general purpose * registers. */ int nft_validate_input_register(enum nft_registers reg) { if (reg <= NFT_REG_VERDICT) return -EINVAL; if (reg > NFT_REG_MAX) return -ERANGE; return 0; } EXPORT_SYMBOL_GPL(nft_validate_input_register); /** * nft_validate_output_register - validate an expressions' output register * * @reg: the register number * * Validate that the output register is one of the general purpose * registers or the verdict register. */ int nft_validate_output_register(enum nft_registers reg) { if (reg < NFT_REG_VERDICT) return -EINVAL; if (reg > NFT_REG_MAX) return -ERANGE; return 0; } EXPORT_SYMBOL_GPL(nft_validate_output_register); /** * nft_validate_data_load - validate an expressions' data load * * @ctx: context of the expression performing the load * @reg: the destination register number * @data: the data to load * @type: the data type * * Validate that a data load uses the appropriate data type for * the destination register. A value of NULL for the data means * that its runtime gathered data, which is always of type * NFT_DATA_VALUE. */ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, const struct nft_data *data, enum nft_data_types type) { int err; switch (reg) { case NFT_REG_VERDICT: if (data == NULL || type != NFT_DATA_VERDICT) return -EINVAL; if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) { err = nf_tables_check_loops(ctx, data->chain); if (err < 0) return err; if (ctx->chain->level + 1 > data->chain->level) { if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE) return -EMLINK; data->chain->level = ctx->chain->level + 1; } } return 0; default: if (data != NULL && type != NFT_DATA_VALUE) return -EINVAL; return 0; } } EXPORT_SYMBOL_GPL(nft_validate_data_load); static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = { [NFTA_VERDICT_CODE] = { .type = NLA_U32 }, [NFTA_VERDICT_CHAIN] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, }; static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { struct nlattr *tb[NFTA_VERDICT_MAX + 1]; struct nft_chain *chain; int err; err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy); if (err < 0) return err; if (!tb[NFTA_VERDICT_CODE]) return -EINVAL; data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE])); switch (data->verdict) { default: switch (data->verdict & NF_VERDICT_MASK) { case NF_ACCEPT: case NF_DROP: case NF_QUEUE: break; default: return -EINVAL; } /* fall through */ case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: desc->len = sizeof(data->verdict); break; case NFT_JUMP: case NFT_GOTO: if (!tb[NFTA_VERDICT_CHAIN]) return -EINVAL; chain = nf_tables_chain_lookup(ctx->table, tb[NFTA_VERDICT_CHAIN]); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_BASE_CHAIN) return -EOPNOTSUPP; chain->use++; data->chain = chain; desc->len = sizeof(data); break; } desc->type = NFT_DATA_VERDICT; return 0; } static void nft_verdict_uninit(const struct nft_data *data) { switch (data->verdict) { case NFT_JUMP: case NFT_GOTO: data->chain->use--; break; } } static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data) { struct nlattr *nest; nest = nla_nest_start(skb, NFTA_DATA_VERDICT); if (!nest) goto nla_put_failure; if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict))) goto nla_put_failure; switch (data->verdict) { case NFT_JUMP: case NFT_GOTO: if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name)) goto nla_put_failure; } nla_nest_end(skb, nest); return 0; nla_put_failure: return -1; } static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { unsigned int len; len = nla_len(nla); if (len == 0) return -EINVAL; if (len > sizeof(data->data)) return -EOVERFLOW; nla_memcpy(data->data, nla, sizeof(data->data)); desc->type = NFT_DATA_VALUE; desc->len = len; return 0; } static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data, unsigned int len) { return nla_put(skb, NFTA_DATA_VALUE, len, data->data); } static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = { [NFTA_DATA_VALUE] = { .type = NLA_BINARY, .len = FIELD_SIZEOF(struct nft_data, data) }, [NFTA_DATA_VERDICT] = { .type = NLA_NESTED }, }; /** * nft_data_init - parse nf_tables data netlink attributes * * @ctx: context of the expression using the data * @data: destination struct nft_data * @desc: data description * @nla: netlink attribute containing data * * Parse the netlink data attributes and initialize a struct nft_data. * The type and length of data are returned in the data description. * * The caller can indicate that it only wants to accept data of type * NFT_DATA_VALUE by passing NULL for the ctx argument. */ int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { struct nlattr *tb[NFTA_DATA_MAX + 1]; int err; err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy); if (err < 0) return err; if (tb[NFTA_DATA_VALUE]) return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]); if (tb[NFTA_DATA_VERDICT] && ctx != NULL) return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]); return -EINVAL; } EXPORT_SYMBOL_GPL(nft_data_init); /** * nft_data_uninit - release a nft_data item * * @data: struct nft_data to release * @type: type of data * * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, * all others need to be released by calling this function. */ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) { switch (type) { case NFT_DATA_VALUE: return; case NFT_DATA_VERDICT: return nft_verdict_uninit(data); default: WARN_ON(1); } } EXPORT_SYMBOL_GPL(nft_data_uninit); int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, enum nft_data_types type, unsigned int len) { struct nlattr *nest; int err; nest = nla_nest_start(skb, attr); if (nest == NULL) return -1; switch (type) { case NFT_DATA_VALUE: err = nft_value_dump(skb, data, len); break; case NFT_DATA_VERDICT: err = nft_verdict_dump(skb, data); break; default: err = -EINVAL; WARN_ON(1); } nla_nest_end(skb, nest); return err; } EXPORT_SYMBOL_GPL(nft_data_dump); static int nf_tables_init_net(struct net *net) { INIT_LIST_HEAD(&net->nft.af_info); INIT_LIST_HEAD(&net->nft.commit_list); net->nft.base_seq = 1; return 0; } static struct pernet_operations nf_tables_net_ops = { .init = nf_tables_init_net, }; static int __init nf_tables_module_init(void) { int err; info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS, GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto err1; } err = nf_tables_core_module_init(); if (err < 0) goto err2; err = nfnetlink_subsys_register(&nf_tables_subsys); if (err < 0) goto err3; pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n"); return register_pernet_subsys(&nf_tables_net_ops); err3: nf_tables_core_module_exit(); err2: kfree(info); err1: return err; } static void __exit nf_tables_module_exit(void) { unregister_pernet_subsys(&nf_tables_net_ops); nfnetlink_subsys_unregister(&nf_tables_subsys); rcu_barrier(); nf_tables_core_module_exit(); kfree(info); } module_init(nf_tables_module_init); module_exit(nf_tables_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
./CrossVul/dataset_final_sorted/CWE-19/c/good_1487_0
crossvul-cpp_data_good_1453_0
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_attr_sf.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_bmap_btree.h" #include "xfs_attr.h" #include "xfs_attr_leaf.h" #include "xfs_attr_remote.h" #include "xfs_error.h" #include "xfs_quota.h" #include "xfs_trans_space.h" #include "xfs_trace.h" #include "xfs_dinode.h" /* * xfs_attr.c * * Provide the external interfaces to manage attribute lists. */ /*======================================================================== * Function prototypes for the kernel. *========================================================================*/ /* * Internal routines when attribute list fits inside the inode. */ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args); /* * Internal routines when attribute list is one block. */ STATIC int xfs_attr_leaf_get(xfs_da_args_t *args); STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args); STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args); /* * Internal routines when attribute list is more than one block. */ STATIC int xfs_attr_node_get(xfs_da_args_t *args); STATIC int xfs_attr_node_addname(xfs_da_args_t *args); STATIC int xfs_attr_node_removename(xfs_da_args_t *args); STATIC int xfs_attr_fillstate(xfs_da_state_t *state); STATIC int xfs_attr_refillstate(xfs_da_state_t *state); STATIC int xfs_attr_name_to_xname( struct xfs_name *xname, const unsigned char *aname) { if (!aname) return EINVAL; xname->name = aname; xname->len = strlen((char *)aname); if (xname->len >= MAXNAMELEN) return EFAULT; /* match IRIX behaviour */ return 0; } int xfs_inode_hasattr( struct xfs_inode *ip) { if (!XFS_IFORK_Q(ip) || (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_anextents == 0)) return 0; return 1; } /*======================================================================== * Overall external interface routines. *========================================================================*/ STATIC int xfs_attr_get_int( struct xfs_inode *ip, struct xfs_name *name, unsigned char *value, int *valuelenp, int flags) { xfs_da_args_t args; int error; if (!xfs_inode_hasattr(ip)) return ENOATTR; /* * Fill in the arg structure for this request. */ memset((char *)&args, 0, sizeof(args)); args.name = name->name; args.namelen = name->len; args.value = value; args.valuelen = *valuelenp; args.flags = flags; args.hashval = xfs_da_hashname(args.name, args.namelen); args.dp = ip; args.whichfork = XFS_ATTR_FORK; /* * Decide on what work routines to call based on the inode size. */ if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { error = xfs_attr_shortform_getvalue(&args); } else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) { error = xfs_attr_leaf_get(&args); } else { error = xfs_attr_node_get(&args); } /* * Return the number of bytes in the value to the caller. */ *valuelenp = args.valuelen; if (error == EEXIST) error = 0; return(error); } int xfs_attr_get( xfs_inode_t *ip, const unsigned char *name, unsigned char *value, int *valuelenp, int flags) { int error; struct xfs_name xname; uint lock_mode; XFS_STATS_INC(xs_attr_get); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return(EIO); error = xfs_attr_name_to_xname(&xname, name); if (error) return error; lock_mode = xfs_ilock_attr_map_shared(ip); error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); xfs_iunlock(ip, lock_mode); return(error); } /* * Calculate how many blocks we need for the new attribute, */ STATIC int xfs_attr_calc_size( struct xfs_inode *ip, int namelen, int valuelen, int *local) { struct xfs_mount *mp = ip->i_mount; int size; int nblks; /* * Determine space new attribute will use, and if it would be * "local" or "remote" (note: local != inline). */ size = xfs_attr_leaf_newentsize(namelen, valuelen, mp->m_sb.sb_blocksize, local); nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); if (*local) { if (size > (mp->m_sb.sb_blocksize >> 1)) { /* Double split possible */ nblks *= 2; } } else { /* * Out of line attribute, cannot double split, but * make room for the attribute value itself. */ uint dblocks = xfs_attr3_rmt_blocks(mp, valuelen); nblks += dblocks; nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); } return nblks; } STATIC int xfs_attr_set_int( struct xfs_inode *dp, struct xfs_name *name, unsigned char *value, int valuelen, int flags) { xfs_da_args_t args; xfs_fsblock_t firstblock; xfs_bmap_free_t flist; int error, err2, committed; struct xfs_mount *mp = dp->i_mount; struct xfs_trans_res tres; int rsvd = (flags & ATTR_ROOT) != 0; int local; /* * Attach the dquots to the inode. */ error = xfs_qm_dqattach(dp, 0); if (error) return error; /* * If the inode doesn't have an attribute fork, add one. * (inode must not be locked when we call this routine) */ if (XFS_IFORK_Q(dp) == 0) { int sf_size = sizeof(xfs_attr_sf_hdr_t) + XFS_ATTR_SF_ENTSIZE_BYNAME(name->len, valuelen); if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd))) return(error); } /* * Fill in the arg structure for this request. */ memset((char *)&args, 0, sizeof(args)); args.name = name->name; args.namelen = name->len; args.value = value; args.valuelen = valuelen; args.flags = flags; args.hashval = xfs_da_hashname(args.name, args.namelen); args.dp = dp; args.firstblock = &firstblock; args.flist = &flist; args.whichfork = XFS_ATTR_FORK; args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; /* Size is now blocks for attribute data */ args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local); /* * Start our first transaction of the day. * * All future transactions during this code must be "chained" off * this one via the trans_dup() call. All transactions will contain * the inode, and the inode will always be marked with trans_ihold(). * Since the inode will be locked in all transactions, we must log * the inode in every transaction to let it float upward through * the log. */ args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET); /* * Root fork attributes can use reserved data blocks for this * operation if necessary */ if (rsvd) args.trans->t_flags |= XFS_TRANS_RESERVE; tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres + M_RES(mp)->tr_attrsetrt.tr_logres * args.total; tres.tr_logcount = XFS_ATTRSET_LOG_COUNT; tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; error = xfs_trans_reserve(args.trans, &tres, args.total, 0); if (error) { xfs_trans_cancel(args.trans, 0); return(error); } xfs_ilock(dp, XFS_ILOCK_EXCL); error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0, rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES : XFS_QMOPT_RES_REGBLKS); if (error) { xfs_iunlock(dp, XFS_ILOCK_EXCL); xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES); return (error); } xfs_trans_ijoin(args.trans, dp, 0); /* * If the attribute list is non-existent or a shortform list, * upgrade it to a single-leaf-block attribute list. */ if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || ((dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) && (dp->i_d.di_anextents == 0))) { /* * Build initial attribute list (if required). */ if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) xfs_attr_shortform_create(&args); /* * Try to add the attr to the attribute list in * the inode. */ error = xfs_attr_shortform_addname(&args); if (error != ENOSPC) { /* * Commit the shortform mods, and we're done. * NOTE: this is also the error path (EEXIST, etc). */ ASSERT(args.trans != NULL); /* * If this is a synchronous mount, make sure that * the transaction goes to disk before returning * to the user. */ if (mp->m_flags & XFS_MOUNT_WSYNC) { xfs_trans_set_sync(args.trans); } if (!error && (flags & ATTR_KERNOTIME) == 0) { xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG); } err2 = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(dp, XFS_ILOCK_EXCL); return(error == 0 ? err2 : error); } /* * It won't fit in the shortform, transform to a leaf block. * GROT: another possible req'mt for a double-split btree op. */ xfs_bmap_init(args.flist, args.firstblock); error = xfs_attr_shortform_to_leaf(&args); if (!error) { error = xfs_bmap_finish(&args.trans, args.flist, &committed); } if (error) { ASSERT(committed); args.trans = NULL; xfs_bmap_cancel(&flist); goto out; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args.trans, dp, 0); /* * Commit the leaf transformation. We'll need another (linked) * transaction to add the new attribute to the leaf. */ error = xfs_trans_roll(&args.trans, dp); if (error) goto out; } if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { error = xfs_attr_leaf_addname(&args); } else { error = xfs_attr_node_addname(&args); } if (error) { goto out; } /* * If this is a synchronous mount, make sure that the * transaction goes to disk before returning to the user. */ if (mp->m_flags & XFS_MOUNT_WSYNC) { xfs_trans_set_sync(args.trans); } if ((flags & ATTR_KERNOTIME) == 0) xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG); /* * Commit the last in the sequence of transactions. */ xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(dp, XFS_ILOCK_EXCL); return(error); out: if (args.trans) xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); xfs_iunlock(dp, XFS_ILOCK_EXCL); return(error); } int xfs_attr_set( xfs_inode_t *dp, const unsigned char *name, unsigned char *value, int valuelen, int flags) { int error; struct xfs_name xname; XFS_STATS_INC(xs_attr_set); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return (EIO); error = xfs_attr_name_to_xname(&xname, name); if (error) return error; return xfs_attr_set_int(dp, &xname, value, valuelen, flags); } /* * Generic handler routine to remove a name from an attribute list. * Transitions attribute list from Btree to shortform as necessary. */ STATIC int xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) { xfs_da_args_t args; xfs_fsblock_t firstblock; xfs_bmap_free_t flist; int error; xfs_mount_t *mp = dp->i_mount; /* * Fill in the arg structure for this request. */ memset((char *)&args, 0, sizeof(args)); args.name = name->name; args.namelen = name->len; args.flags = flags; args.hashval = xfs_da_hashname(args.name, args.namelen); args.dp = dp; args.firstblock = &firstblock; args.flist = &flist; args.total = 0; args.whichfork = XFS_ATTR_FORK; /* * we have no control over the attribute names that userspace passes us * to remove, so we have to allow the name lookup prior to attribute * removal to fail. */ args.op_flags = XFS_DA_OP_OKNOENT; /* * Attach the dquots to the inode. */ error = xfs_qm_dqattach(dp, 0); if (error) return error; /* * Start our first transaction of the day. * * All future transactions during this code must be "chained" off * this one via the trans_dup() call. All transactions will contain * the inode, and the inode will always be marked with trans_ihold(). * Since the inode will be locked in all transactions, we must log * the inode in every transaction to let it float upward through * the log. */ args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM); /* * Root fork attributes can use reserved data blocks for this * operation if necessary */ if (flags & ATTR_ROOT) args.trans->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm, XFS_ATTRRM_SPACE_RES(mp), 0); if (error) { xfs_trans_cancel(args.trans, 0); return(error); } xfs_ilock(dp, XFS_ILOCK_EXCL); /* * No need to make quota reservations here. We expect to release some * blocks not allocate in the common case. */ xfs_trans_ijoin(args.trans, dp, 0); /* * Decide on what work routines to call based on the inode size. */ if (!xfs_inode_hasattr(dp)) { error = XFS_ERROR(ENOATTR); goto out; } if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); error = xfs_attr_shortform_remove(&args); if (error) { goto out; } } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { error = xfs_attr_leaf_removename(&args); } else { error = xfs_attr_node_removename(&args); } if (error) { goto out; } /* * If this is a synchronous mount, make sure that the * transaction goes to disk before returning to the user. */ if (mp->m_flags & XFS_MOUNT_WSYNC) { xfs_trans_set_sync(args.trans); } if ((flags & ATTR_KERNOTIME) == 0) xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG); /* * Commit the last in the sequence of transactions. */ xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE); error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES); xfs_iunlock(dp, XFS_ILOCK_EXCL); return(error); out: if (args.trans) xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); xfs_iunlock(dp, XFS_ILOCK_EXCL); return(error); } int xfs_attr_remove( xfs_inode_t *dp, const unsigned char *name, int flags) { int error; struct xfs_name xname; XFS_STATS_INC(xs_attr_remove); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return (EIO); error = xfs_attr_name_to_xname(&xname, name); if (error) return error; xfs_ilock(dp, XFS_ILOCK_SHARED); if (!xfs_inode_hasattr(dp)) { xfs_iunlock(dp, XFS_ILOCK_SHARED); return XFS_ERROR(ENOATTR); } xfs_iunlock(dp, XFS_ILOCK_SHARED); return xfs_attr_remove_int(dp, &xname, flags); } /*======================================================================== * External routines when attribute list is inside the inode *========================================================================*/ /* * Add a name to the shortform attribute list structure * This is the external routine. */ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args) { int newsize, forkoff, retval; trace_xfs_attr_sf_addname(args); retval = xfs_attr_shortform_lookup(args); if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { return(retval); } else if (retval == EEXIST) { if (args->flags & ATTR_CREATE) return(retval); retval = xfs_attr_shortform_remove(args); ASSERT(retval == 0); } if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX) return(XFS_ERROR(ENOSPC)); newsize = XFS_ATTR_SF_TOTSIZE(args->dp); newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize); if (!forkoff) return(XFS_ERROR(ENOSPC)); xfs_attr_shortform_add(args, forkoff); return(0); } /*======================================================================== * External routines when attribute list is one block *========================================================================*/ /* * Add a name to the leaf attribute list structure * * This leaf block cannot have a "remote" value, we only call this routine * if bmap_one_block() says there is only one block (ie: no remote blks). */ STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args) { xfs_inode_t *dp; struct xfs_buf *bp; int retval, error, committed, forkoff; trace_xfs_attr_leaf_addname(args); /* * Read the (only) block in the attribute list in. */ dp = args->dp; args->blkno = 0; error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); if (error) return error; /* * Look up the given attribute in the leaf block. Figure out if * the given flags produce an error or call for an atomic rename. */ retval = xfs_attr3_leaf_lookup_int(bp, args); if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { xfs_trans_brelse(args->trans, bp); return retval; } else if (retval == EEXIST) { if (args->flags & ATTR_CREATE) { /* pure create op */ xfs_trans_brelse(args->trans, bp); return retval; } trace_xfs_attr_leaf_replace(args); /* save the attribute state for later removal*/ args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */ args->blkno2 = args->blkno; /* set 2nd entry info*/ args->index2 = args->index; args->rmtblkno2 = args->rmtblkno; args->rmtblkcnt2 = args->rmtblkcnt; args->rmtvaluelen2 = args->rmtvaluelen; /* * clear the remote attr state now that it is saved so that the * values reflect the state of the attribute we are about to * add, not the attribute we just found and will remove later. */ args->rmtblkno = 0; args->rmtblkcnt = 0; args->rmtvaluelen = 0; } /* * Add the attribute to the leaf block, transitioning to a Btree * if required. */ retval = xfs_attr3_leaf_add(bp, args); if (retval == ENOSPC) { /* * Promote the attribute list to the Btree format, then * Commit that transaction so that the node_addname() call * can manage its own transactions. */ xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); return(error); } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); /* * Commit the current trans (including the inode) and start * a new one. */ error = xfs_trans_roll(&args->trans, dp); if (error) return (error); /* * Fob the whole rest of the problem off on the Btree code. */ error = xfs_attr_node_addname(args); return(error); } /* * Commit the transaction that added the attr name so that * later routines can manage their own transactions. */ error = xfs_trans_roll(&args->trans, dp); if (error) return (error); /* * If there was an out-of-line value, allocate the blocks we * identified for its storage and copy the value. This is done * after we create the attribute so that we don't overflow the * maximum size of a transaction and/or hit a deadlock. */ if (args->rmtblkno > 0) { error = xfs_attr_rmtval_set(args); if (error) return(error); } /* * If this is an atomic rename operation, we must "flip" the * incomplete flags on the "new" and "old" attribute/value pairs * so that one disappears and one appears atomically. Then we * must remove the "old" attribute/value pair. */ if (args->op_flags & XFS_DA_OP_RENAME) { /* * In a separate transaction, set the incomplete flag on the * "old" attr and clear the incomplete flag on the "new" attr. */ error = xfs_attr3_leaf_flipflags(args); if (error) return(error); /* * Dismantle the "old" attribute/value pair by removing * a "remote" value (if it exists). */ args->index = args->index2; args->blkno = args->blkno2; args->rmtblkno = args->rmtblkno2; args->rmtblkcnt = args->rmtblkcnt2; args->rmtvaluelen = args->rmtvaluelen2; if (args->rmtblkno) { error = xfs_attr_rmtval_remove(args); if (error) return(error); } /* * Read in the block containing the "old" attr, then * remove the "old" attr from that block (neat, huh!) */ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); if (error) return error; xfs_attr3_leaf_remove(bp, args); /* * If the result is small enough, shrink it all into the inode. */ if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); return(error); } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } /* * Commit the remove and start the next trans in series. */ error = xfs_trans_roll(&args->trans, dp); } else if (args->rmtblkno > 0) { /* * Added a "remote" value, just clear the incomplete flag. */ error = xfs_attr3_leaf_clearflag(args); } return error; } /* * Remove a name from the leaf attribute list structure * * This leaf block cannot have a "remote" value, we only call this routine * if bmap_one_block() says there is only one block (ie: no remote blks). */ STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args) { xfs_inode_t *dp; struct xfs_buf *bp; int error, committed, forkoff; trace_xfs_attr_leaf_removename(args); /* * Remove the attribute. */ dp = args->dp; args->blkno = 0; error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); if (error) return error; error = xfs_attr3_leaf_lookup_int(bp, args); if (error == ENOATTR) { xfs_trans_brelse(args->trans, bp); return error; } xfs_attr3_leaf_remove(bp, args); /* * If the result is small enough, shrink it all into the inode. */ if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); return error; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } return 0; } /* * Look up a name in a leaf attribute list structure. * * This leaf block cannot have a "remote" value, we only call this routine * if bmap_one_block() says there is only one block (ie: no remote blks). */ STATIC int xfs_attr_leaf_get(xfs_da_args_t *args) { struct xfs_buf *bp; int error; trace_xfs_attr_leaf_get(args); args->blkno = 0; error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); if (error) return error; error = xfs_attr3_leaf_lookup_int(bp, args); if (error != EEXIST) { xfs_trans_brelse(args->trans, bp); return error; } error = xfs_attr3_leaf_getvalue(bp, args); xfs_trans_brelse(args->trans, bp); if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) { error = xfs_attr_rmtval_get(args); } return error; } /*======================================================================== * External routines when attribute list size > XFS_LBSIZE(mp). *========================================================================*/ /* * Add a name to a Btree-format attribute list. * * This will involve walking down the Btree, and may involve splitting * leaf nodes and even splitting intermediate nodes up to and including * the root node (a special case of an intermediate node). * * "Remote" attribute values confuse the issue and atomic rename operations * add a whole extra layer of confusion on top of that. */ STATIC int xfs_attr_node_addname(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; xfs_inode_t *dp; xfs_mount_t *mp; int committed, retval, error; trace_xfs_attr_node_addname(args); /* * Fill in bucket of arguments/results/context to carry around. */ dp = args->dp; mp = dp->i_mount; restart: state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; /* * Search to see if name already exists, and get back a pointer * to where it should go. */ error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { goto out; } else if (retval == EEXIST) { if (args->flags & ATTR_CREATE) goto out; trace_xfs_attr_node_replace(args); /* save the attribute state for later removal*/ args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ args->blkno2 = args->blkno; /* set 2nd entry info*/ args->index2 = args->index; args->rmtblkno2 = args->rmtblkno; args->rmtblkcnt2 = args->rmtblkcnt; args->rmtvaluelen2 = args->rmtvaluelen; /* * clear the remote attr state now that it is saved so that the * values reflect the state of the attribute we are about to * add, not the attribute we just found and will remove later. */ args->rmtblkno = 0; args->rmtblkcnt = 0; args->rmtvaluelen = 0; } retval = xfs_attr3_leaf_add(blk->bp, state->args); if (retval == ENOSPC) { if (state->path.active == 1) { /* * Its really a single leaf node, but it had * out-of-line values so it looked like it *might* * have been a b-tree. */ xfs_da_state_free(state); state = NULL; xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); /* * Commit the node conversion and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; goto restart; } /* * Split as many Btree elements as required. * This code tracks the new and old attr's location * in the index/blkno/rmtblkno/rmtblkcnt fields and * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. */ xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_split(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } else { /* * Addition succeeded, update Btree hashvals. */ xfs_da3_fixhashpath(state, &state->path); } /* * Kill the state structure, we're done with it and need to * allow the buffers to come back later. */ xfs_da_state_free(state); state = NULL; /* * Commit the leaf addition or btree split and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; /* * If there was an out-of-line value, allocate the blocks we * identified for its storage and copy the value. This is done * after we create the attribute so that we don't overflow the * maximum size of a transaction and/or hit a deadlock. */ if (args->rmtblkno > 0) { error = xfs_attr_rmtval_set(args); if (error) return(error); } /* * If this is an atomic rename operation, we must "flip" the * incomplete flags on the "new" and "old" attribute/value pairs * so that one disappears and one appears atomically. Then we * must remove the "old" attribute/value pair. */ if (args->op_flags & XFS_DA_OP_RENAME) { /* * In a separate transaction, set the incomplete flag on the * "old" attr and clear the incomplete flag on the "new" attr. */ error = xfs_attr3_leaf_flipflags(args); if (error) goto out; /* * Dismantle the "old" attribute/value pair by removing * a "remote" value (if it exists). */ args->index = args->index2; args->blkno = args->blkno2; args->rmtblkno = args->rmtblkno2; args->rmtblkcnt = args->rmtblkcnt2; args->rmtvaluelen = args->rmtvaluelen2; if (args->rmtblkno) { error = xfs_attr_rmtval_remove(args); if (error) return(error); } /* * Re-find the "old" attribute entry after any split ops. * The INCOMPLETE flag means that we will find the "old" * attr, not the "new" one. */ args->flags |= XFS_ATTR_INCOMPLETE; state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; state->inleaf = 0; error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; /* * Remove the name and update the hashvals in the tree. */ blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); error = xfs_attr3_leaf_remove(blk->bp, args); xfs_da3_fixhashpath(state, &state->path); /* * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } /* * Commit and start the next trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; } else if (args->rmtblkno > 0) { /* * Added a "remote" value, just clear the incomplete flag. */ error = xfs_attr3_leaf_clearflag(args); if (error) goto out; } retval = error = 0; out: if (state) xfs_da_state_free(state); if (error) return(error); return(retval); } /* * Remove a name from a B-tree attribute list. * * This will involve walking down the Btree, and may involve joining * leaf nodes and even joining intermediate nodes up to and including * the root node (a special case of an intermediate node). */ STATIC int xfs_attr_node_removename(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; xfs_inode_t *dp; struct xfs_buf *bp; int retval, error, committed, forkoff; trace_xfs_attr_node_removename(args); /* * Tie a string around our finger to remind us where we are. */ dp = args->dp; state = xfs_da_state_alloc(); state->args = args; state->mp = dp->i_mount; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; /* * Search to see if name exists, and get back a pointer to it. */ error = xfs_da3_node_lookup_int(state, &retval); if (error || (retval != EEXIST)) { if (error == 0) error = retval; goto out; } /* * If there is an out-of-line value, de-allocate the blocks. * This is done before we remove the attribute so that we don't * overflow the maximum size of a transaction and/or hit a deadlock. */ blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->bp != NULL); ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if (args->rmtblkno > 0) { /* * Fill in disk block numbers in the state structure * so that we can get the buffers back after we commit * several transactions in the following calls. */ error = xfs_attr_fillstate(state); if (error) goto out; /* * Mark the attribute as INCOMPLETE, then bunmapi() the * remote value. */ error = xfs_attr3_leaf_setflag(args); if (error) goto out; error = xfs_attr_rmtval_remove(args); if (error) goto out; /* * Refill the state structure with buffers, the prior calls * released our buffers. */ error = xfs_attr_refillstate(state); if (error) goto out; } /* * Remove the name and update the hashvals in the tree. */ blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); retval = xfs_attr3_leaf_remove(blk->bp, args); xfs_da3_fixhashpath(state, &state->path); /* * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); /* * Commit the Btree join operation and start a new trans. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; } /* * If the result is small enough, push it all into the inode. */ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { /* * Have to get rid of the copy of this dabuf in the state. */ ASSERT(state->path.active == 1); ASSERT(state->path.blk[0].bp); state->path.blk[0].bp = NULL; error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp); if (error) goto out; if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } else xfs_trans_brelse(args->trans, bp); } error = 0; out: xfs_da_state_free(state); return(error); } /* * Fill in the disk block numbers in the state structure for the buffers * that are attached to the state structure. * This is done so that we can quickly reattach ourselves to those buffers * after some set of transaction commits have released these buffers. */ STATIC int xfs_attr_fillstate(xfs_da_state_t *state) { xfs_da_state_path_t *path; xfs_da_state_blk_t *blk; int level; trace_xfs_attr_fillstate(state->args); /* * Roll down the "path" in the state structure, storing the on-disk * block number for those buffers in the "path". */ path = &state->path; ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); for (blk = path->blk, level = 0; level < path->active; blk++, level++) { if (blk->bp) { blk->disk_blkno = XFS_BUF_ADDR(blk->bp); blk->bp = NULL; } else { blk->disk_blkno = 0; } } /* * Roll down the "altpath" in the state structure, storing the on-disk * block number for those buffers in the "altpath". */ path = &state->altpath; ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); for (blk = path->blk, level = 0; level < path->active; blk++, level++) { if (blk->bp) { blk->disk_blkno = XFS_BUF_ADDR(blk->bp); blk->bp = NULL; } else { blk->disk_blkno = 0; } } return(0); } /* * Reattach the buffers to the state structure based on the disk block * numbers stored in the state structure. * This is done after some set of transaction commits have released those * buffers from our grip. */ STATIC int xfs_attr_refillstate(xfs_da_state_t *state) { xfs_da_state_path_t *path; xfs_da_state_blk_t *blk; int level, error; trace_xfs_attr_refillstate(state->args); /* * Roll down the "path" in the state structure, storing the on-disk * block number for those buffers in the "path". */ path = &state->path; ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); for (blk = path->blk, level = 0; level < path->active; blk++, level++) { if (blk->disk_blkno) { error = xfs_da3_node_read(state->args->trans, state->args->dp, blk->blkno, blk->disk_blkno, &blk->bp, XFS_ATTR_FORK); if (error) return(error); } else { blk->bp = NULL; } } /* * Roll down the "altpath" in the state structure, storing the on-disk * block number for those buffers in the "altpath". */ path = &state->altpath; ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH)); for (blk = path->blk, level = 0; level < path->active; blk++, level++) { if (blk->disk_blkno) { error = xfs_da3_node_read(state->args->trans, state->args->dp, blk->blkno, blk->disk_blkno, &blk->bp, XFS_ATTR_FORK); if (error) return(error); } else { blk->bp = NULL; } } return(0); } /* * Look up a filename in a node attribute list. * * This routine gets called for any attribute fork that has more than one * block, ie: both true Btree attr lists and for single-leaf-blocks with * "remote" values taking up more blocks. */ STATIC int xfs_attr_node_get(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; int error, retval; int i; trace_xfs_attr_node_get(args); state = xfs_da_state_alloc(); state->args = args; state->mp = args->dp->i_mount; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; /* * Search to see if name exists, and get back a pointer to it. */ error = xfs_da3_node_lookup_int(state, &retval); if (error) { retval = error; } else if (retval == EEXIST) { blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->bp != NULL); ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); /* * Get the value, local or "remote" */ retval = xfs_attr3_leaf_getvalue(blk->bp, args); if (!retval && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) { retval = xfs_attr_rmtval_get(args); } } /* * If not in a transaction, we have to release all the buffers. */ for (i = 0; i < state->path.active; i++) { xfs_trans_brelse(args->trans, state->path.blk[i].bp); state->path.blk[i].bp = NULL; } xfs_da_state_free(state); return(retval); }
./CrossVul/dataset_final_sorted/CWE-19/c/good_1453_0
crossvul-cpp_data_bad_1844_1
404: Not Found
./CrossVul/dataset_final_sorted/CWE-19/c/bad_1844_1
crossvul-cpp_data_good_2422_1
/* * Based on arch/arm/mm/fault.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 1995-2004 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/hardirq.h> #include <linux/init.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> #include <linux/sched.h> #include <linux/highmem.h> #include <linux/perf_event.h> #include <asm/exception.h> #include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/system_misc.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> static const char *fault_name(unsigned int esr); /* * Dump out the page tables associated with 'addr' in mm 'mm'. */ void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; if (!mm) mm = &init_mm; pr_alert("pgd = %p\n", mm->pgd); pgd = pgd_offset(mm, addr); pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd)); do { pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none(*pgd) || pgd_bad(*pgd)) break; pud = pud_offset(pgd, addr); if (pud_none(*pud) || pud_bad(*pud)) break; pmd = pmd_offset(pud, addr); printk(", *pmd=%016llx", pmd_val(*pmd)); if (pmd_none(*pmd) || pmd_bad(*pmd)) break; pte = pte_offset_map(pmd, addr); printk(", *pte=%016llx", pte_val(*pte)); pte_unmap(pte); } while(0); printk("\n"); } /* * The kernel tried to access some page that wasn't present. */ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int esr, struct pt_regs *regs) { /* * Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; /* * No handler, we'll have to terminate things with extreme prejudice. */ bust_spinlocks(1); pr_alert("Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, esr); bust_spinlocks(0); do_exit(SIGKILL); } /* * Something tried to access memory that isn't in our memory map. User mode * accesses just cause a SIGSEGV */ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, unsigned int esr, unsigned int sig, int code, struct pt_regs *regs) { struct siginfo si; if (show_unhandled_signals && unhandled_signal(tsk, sig) && printk_ratelimit()) { pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, addr, esr); show_pte(tsk->mm, addr); show_regs(regs); } tsk->thread.fault_address = addr; tsk->thread.fault_code = esr; si.si_signo = sig; si.si_errno = 0; si.si_code = code; si.si_addr = (void __user *)addr; force_sig_info(sig, &si, tsk); } static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->active_mm; /* * If we are in kernel mode at this point, we have no context to * handle this fault with. */ if (user_mode(regs)) __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); else __do_kernel_fault(mm, addr, esr, regs); } #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 #define ESR_LNX_EXEC (1 << 24) static int __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) { struct vm_area_struct *vma; int fault; vma = find_vma(mm, addr); fault = VM_FAULT_BADMAP; if (unlikely(!vma)) goto out; if (unlikely(vma->vm_start > addr)) goto check_stack; /* * Ok, we have a good vm_area for this memory access, so we can handle * it. */ good_area: /* * Check that the permissions on the VMA allow for the fault which * occurred. If we encountered a write or exec fault, we must have * appropriate permissions, otherwise we allow any permission. */ if (!(vma->vm_flags & vm_flags)) { fault = VM_FAULT_BADACCESS; goto out; } return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) goto good_area; out: return fault; } static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; tsk = current; mm = tsk->mm; /* Enable interrupts if they were enabled in the parent context. */ if (interrupts_enabled(regs)) local_irq_enable(); /* * If we're in an interrupt or have no user context, we must not take * the fault. */ if (in_atomic() || !mm) goto no_context; if (user_mode(regs)) mm_flags |= FAULT_FLAG_USER; if (esr & ESR_LNX_EXEC) { vm_flags = VM_EXEC; } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) { vm_flags = VM_WRITE; mm_flags |= FAULT_FLAG_WRITE; } /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, * we can bug out early if this is from code which shouldn't. */ if (!down_read_trylock(&mm->mmap_sem)) { if (!user_mode(regs) && !search_exception_tables(regs->pc)) goto no_context; retry: down_read(&mm->mmap_sem); } else { /* * The above down_read_trylock() might have succeeded in which * case, we'll have missed the might_sleep() from down_read(). */ might_sleep(); #ifdef CONFIG_DEBUG_VM if (!user_mode(regs) && !search_exception_tables(regs->pc)) goto no_context; #endif } fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); /* * If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_sem because it * would already be released in __lock_page_or_retry in mm/filemap.c. */ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; /* * Major/minor page fault accounting is only done on the initial * attempt. If we go through a retry, it is extremely likely that the * page will be found in page cache at that point. */ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); } if (fault & VM_FAULT_RETRY) { /* * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of * starvation. */ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; goto retry; } } up_read(&mm->mmap_sem); /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; /* * If we are in kernel mode at this point, we have no context to * handle this fault with. */ if (!user_mode(regs)) goto no_context; if (fault & VM_FAULT_OOM) { /* * We ran out of memory, call the OOM killer, and return to * userspace (which will retry the fault, or kill us if we got * oom-killed). */ pagefault_out_of_memory(); return 0; } if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to successfully fix up * this page fault. */ sig = SIGBUS; code = BUS_ADRERR; } else { /* * Something tried to access memory that isn't in our memory * map. */ sig = SIGSEGV; code = fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR; } __do_user_fault(tsk, addr, esr, sig, code, regs); return 0; no_context: __do_kernel_fault(mm, addr, esr, regs); return 0; } /* * First Level Translation Fault Handler * * We enter here because the first level page table doesn't contain a valid * entry for the address. * * If the address is in kernel space (>= TASK_SIZE), then we are probably * faulting in the vmalloc() area. * * If the init_task's first level page tables contains the relevant entry, we * copy the it to this task. If not, we send the process a signal, fixup the * exception, or oops the kernel. * * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt * or a critical region, and should only copy the information from the master * page table, nothing more. */ static int __kprobes do_translation_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { if (addr < TASK_SIZE) return do_page_fault(addr, esr, regs); do_bad_area(addr, esr, regs); return 0; } /* * This abort handler always returns "fault". */ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) { return 1; } static struct fault_info { int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); int sig; int code; const char *name; } fault_info[] = { { do_bad, SIGBUS, 0, "ttbr address size fault" }, { do_bad, SIGBUS, 0, "level 1 address size fault" }, { do_bad, SIGBUS, 0, "level 2 address size fault" }, { do_bad, SIGBUS, 0, "level 3 address size fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "input address range fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_bad, SIGBUS, 0, "reserved access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, { do_bad, SIGBUS, 0, "reserved permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, { do_bad, SIGBUS, 0, "asynchronous external abort" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, { do_bad, SIGBUS, 0, "asynchronous parity error" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_bad, SIGBUS, 0, "debug event" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 37" }, { do_bad, SIGBUS, 0, "unknown 38" }, { do_bad, SIGBUS, 0, "unknown 39" }, { do_bad, SIGBUS, 0, "unknown 40" }, { do_bad, SIGBUS, 0, "unknown 41" }, { do_bad, SIGBUS, 0, "unknown 42" }, { do_bad, SIGBUS, 0, "unknown 43" }, { do_bad, SIGBUS, 0, "unknown 44" }, { do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, { do_bad, SIGBUS, 0, "unknown 48" }, { do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, { do_bad, SIGBUS, 0, "unknown 53" }, { do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 57" }, { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, { do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 60" }, { do_bad, SIGBUS, 0, "unknown 61" }, { do_bad, SIGBUS, 0, "unknown 62" }, { do_bad, SIGBUS, 0, "unknown 63" }, }; static const char *fault_name(unsigned int esr) { const struct fault_info *inf = fault_info + (esr & 63); return inf->name; } /* * Dispatch a data abort to the relevant handler. */ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = fault_info + (esr & 63); struct siginfo info; if (!inf->fn(addr, esr, regs)) return; pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n", inf->name, esr, addr); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; info.si_addr = (void __user *)addr; arm64_notify_die("", regs, &info, esr); } /* * Handle stack alignment exceptions. */ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { struct siginfo info; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *)addr; arm64_notify_die("", regs, &info, esr); } static struct fault_info debug_fault_info[] = { { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, { do_bad, SIGBUS, 0, "unknown 3" }, { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, { do_bad, SIGTRAP, 0, "aarch32 vector catch" }, { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, { do_bad, SIGBUS, 0, "unknown 7" }, }; void __init hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, int code, const char *name) { BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); debug_fault_info[nr].fn = fn; debug_fault_info[nr].sig = sig; debug_fault_info[nr].code = code; debug_fault_info[nr].name = name; } asmlinkage int __exception do_debug_exception(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr); struct siginfo info; if (!inf->fn(addr, esr, regs)) return 1; pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n", inf->name, esr, addr); info.si_signo = inf->sig; info.si_errno = 0; info.si_code = inf->code; info.si_addr = (void __user *)addr; arm64_notify_die("", regs, &info, 0); return 0; }
./CrossVul/dataset_final_sorted/CWE-19/c/good_2422_1
crossvul-cpp_data_good_5253_0
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/ptp_classify.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/xattr.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> #include <net/busy_poll.h> #include <linux/errqueue.h> #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; unsigned int sysctl_net_busy_poll __read_mostly; #endif static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to); static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .read_iter = sock_read_iter, .write_iter = sock_write_iter, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; BUG_ON(klen > sizeof(struct sockaddr_storage)); err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; wq->flags = 0; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", d_inode(dentry)->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) { struct qstr name = { .name = "" }; struct path path; struct file *file; if (dname) { name.name = dname; name.len = strlen(name.name); } else if (sock->sk) { name.name = sock->sk->sk_prot_creator->name; name.len = strlen(name.name); } path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) return ERR_PTR(-ENOMEM); path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (IS_ERR(file)) { /* drop dentry, keep inode */ ihold(d_inode(path.dentry)); path_put(&path); return file; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->private_data = sock; return file; } EXPORT_SYMBOL(sock_alloc_file); static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; newfile = sock_alloc_file(sock, flags, NULL); if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); return fd; } put_unused_fd(fd); return PTR_ERR(newfile); } struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct fd f = fdget(fd); struct socket *sock; *err = -EBADF; if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { *fput_needed = f.flags; return sock; } fdput(f); } return NULL; } #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) static ssize_t sockfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { const char *proto_name; size_t proto_size; int error; error = -ENODATA; if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { proto_name = dentry->d_name.name; proto_size = strlen(proto_name); if (value) { error = -ERANGE; if (proto_size + 1 > size) goto out; strncpy(value, proto_name, proto_size + 1); } error = proto_size + 1; } out: return error; } static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t len; ssize_t used = 0; len = security_inode_listsecurity(d_inode(dentry), buffer, size); if (len < 0) return len; used += len; if (buffer) { if (size < used) return -ERANGE; buffer += len; } len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); used += len; if (buffer) { if (size < used) return -ERANGE; memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); buffer += len; } return used; } static const struct inode_operations sockfs_inode_ops = { .getxattr = sockfs_getxattr, .listxattr = sockfs_listxattr, }; /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_op = &sockfs_inode_ops; this_cpu_add(sockets_in_use, 1); return sock; } EXPORT_SYMBOL(sock_alloc); /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) pr_err("%s: fasync list not empty!\n", __func__); this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) { u8 flags = *tx_flags; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE) flags |= SKBTX_HW_TSTAMP; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) flags |= SKBTX_SW_TSTAMP; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) flags |= SKBTX_ACK_TSTAMP; *tx_flags = flags; } EXPORT_SYMBOL(__sock_tx_timestamp); static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) { int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg)); BUG_ON(ret == -EIOCBQUEUED); return ret; } int sock_sendmsg(struct socket *sock, struct msghdr *msg) { int err = security_socket_sendmsg(sock, msg, msg_data_left(msg)); return err ?: sock_sendmsg_nosec(sock, msg); } EXPORT_SYMBOL(sock_sendmsg); int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); return sock_sendmsg(sock, msg); } EXPORT_SYMBOL(kernel_sendmsg); /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct scm_timestamping tss; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { struct timespec ts; skb_get_timestampns(skb, &ts); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts); } } memset(&tss, 0, sizeof(tss)); if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) && ktime_to_timespec_cond(skb->tstamp, tss.ts + 0)) empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) empty = 0; if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(tss), &tss); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int ack; if (!sock_flag(sk, SOCK_WIFI_STATUS)) return; if (!skb->wifi_acked_valid) return; ack = skb->wifi_acked; put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); } EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &SOCK_SKB_CB(skb)->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { return sock->ops->recvmsg(sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: sock_recvmsg_nosec(sock, msg, size, flags); } EXPORT_SYMBOL(sock_recvmsg); /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size); set_fs(KERNEL_DS); result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; struct msghdr msg = {.msg_iter = *to, .msg_iocb = iocb}; ssize_t res; if (file->f_flags & O_NONBLOCK) msg.msg_flags = MSG_DONTWAIT; if (iocb->ki_pos != 0) return -ESPIPE; if (!iov_iter_count(to)) /* Match SYS5 behaviour */ return 0; res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags); *to = msg.msg_iter; return res; } static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; struct msghdr msg = {.msg_iter = *from, .msg_iocb = iocb}; ssize_t res; if (iocb->ki_pos != 0) return -ESPIPE; if (file->f_flags & O_NONBLOCK) msg.msg_flags = MSG_DONTWAIT; if (sock->type == SOCK_SEQPACKET) msg.msg_flags |= MSG_EOR; res = sock_sendmsg(sock, &msg); *from = msg.msg_iter; return res; } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; f_setown(sock->file, pid, 1); err = 0; break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { unsigned int busy_flag = 0; struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; if (sk_can_busy_loop(sock->sk)) { /* this socket can poll_ll so tell the system call */ busy_flag = POLL_BUSY_LOOP; /* once, only if requested by syscall */ if (wait && (wait->_key & POLL_BUSY_LOOP)) sk_busy_loop(sock->sk, 1); } return busy_flag | sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under rcu_lock */ int sock_wake_async(struct socket_wq *wq, int how, int band) { if (!wq || !wq->fasync_list) return -1; switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { pr_info_once("%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { net_warn_ratelimited("socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res) { return __sock_create(net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = get_unused_fd_flags(flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = get_unused_fd_flags(flags); if (unlikely(fd2 < 0)) { err = fd2; goto out_put_unused_1; } newfile1 = sock_alloc_file(sock1, flags, NULL); if (IS_ERR(newfile1)) { err = PTR_ERR(newfile1); goto out_put_unused_both; } newfile2 = sock_alloc_file(sock2, flags, NULL); if (IS_ERR(newfile2)) { err = PTR_ERR(newfile2); goto out_fput_1; } err = put_user(fd1, &usockvec[0]); if (err) goto out_fput_both; err = put_user(fd2, &usockvec[1]); if (err) goto out_fput_both; audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ return 0; out_fput_both: fput(newfile2); fput(newfile1); put_unused_fd(fd2); put_unused_fd(fd1); goto out; out_fput_1: fput(newfile1); put_unused_fd(fd2); put_unused_fd(fd1); sock_release(sock2); goto out; out_put_unused_both: put_unused_fd(fd2); out_put_unused_1: put_unused_fd(fd1); out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, &address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned int)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = get_unused_fd_flags(flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); if (IS_ERR(newfile)) { err = PTR_ERR(newfile); put_unused_fd(newfd); sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user(&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, &address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user(&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter); if (unlikely(err)) return err; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_name = NULL; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, &address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned int, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter); if (unlikely(err)) return err; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; /* Save some cycles and don't copy the address if not needed */ msg.msg_name = addr ? (struct sockaddr *)&address : NULL; /* We assume all kernel code knows the size of sockaddr_storage */ msg.msg_namelen = 0; msg.msg_iocb = NULL; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int copy_msghdr_from_user(struct msghdr *kmsg, struct user_msghdr __user *umsg, struct sockaddr __user **save_addr, struct iovec **iov) { struct sockaddr __user *uaddr; struct iovec __user *uiov; size_t nr_segs; ssize_t err; if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || __get_user(uaddr, &umsg->msg_name) || __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || __get_user(uiov, &umsg->msg_iov) || __get_user(nr_segs, &umsg->msg_iovlen) || __get_user(kmsg->msg_control, &umsg->msg_control) || __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; if (!uaddr) kmsg->msg_namelen = 0; if (kmsg->msg_namelen < 0) return -EINVAL; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); if (save_addr) *save_addr = uaddr; if (uaddr && kmsg->msg_namelen) { if (!save_addr) { err = move_addr_to_kernel(uaddr, kmsg->msg_namelen, kmsg->msg_name); if (err < 0) return err; } } else { kmsg->msg_name = NULL; kmsg->msg_namelen = 0; } if (nr_segs > UIO_MAXIOV) return -EMSGSIZE; kmsg->msg_iocb = NULL; return import_iovec(save_addr ? READ : WRITE, uiov, nr_segs, UIO_FASTIOV, iov, &kmsg->msg_iter); } static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address, unsigned int allowed_msghdr_flags) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int ctl_len; ssize_t err; msg_sys->msg_name = &address; if (MSG_CMSG_COMPAT & flags) err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); else err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); if (err < 0) return err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; flags |= (msg_sys->msg_flags & allowed_msghdr_flags); ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys); goto out_freectl; } err = sock_sendmsg(sock, msg_sys); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: kfree(iov); return err; } /* * BSD sendmsg interface */ long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmsg(fd, msg, flags); } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; unsigned int oflags = flags; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; flags |= MSG_BATCH; while (datagrams < vlen) { if (datagrams == vlen - 1) flags = oflags; if (MSG_CMSG_COMPAT & flags) { err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags, &used_address, MSG_EOR); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_sendmsg(sock, (struct user_msghdr __user *)entry, &msg_sys, flags, &used_address, MSG_EOR); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; cond_resched(); } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int total_len, len; ssize_t err; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len = COMPAT_NAMELEN(msg); msg_sys->msg_name = &addr; if (MSG_CMSG_COMPAT & flags) err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); else err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); if (err < 0) return err; total_len = iov_iter_count(&msg_sys->msg_iter); cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); /* We assume all kernel code knows the size of sockaddr_storage */ msg_sys->msg_namelen = 0; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user(&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: kfree(iov); return err; } /* * BSD recvmsg interface */ long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_recvmsg(fd, msg, flags); } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_recvmsg(sock, (struct user_msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; cond_resched(); } if (err == 0) goto out_put; if (datagrams == 0) { datagrams = err; goto out_put; } /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } out_put: fput_light(sock->file, fput_needed); return datagrams; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[AUDITSC_ARGS]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); if (err) return err; a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family corresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); pr_info("NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); RCU_INIT_POINTER(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); pr_info("NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize the network sysctl infrastructure. */ err = net_sysctl_init(); if (err) goto out; /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER err = netfilter_init(); if (err) goto out; #endif ptp_classifier_init(); out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, void __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_SRXCLSRLINS: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void __user *)(&rxnfc->fs.m_ext + 1) - (void __user *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void __user *)(&rxnfc->fs.location + 1) - (void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void __user *)(&rxnfc->fs.m_ext + 1) - (const void __user *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void __user *)(&rxnfc->fs.location + 1) - (const void __user *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; mm_segment_t old_fs; int err; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; default: return -ENOIOCTLCMD; } } /* Handle ioctls that use ifreq::ifr_data and just need struct ifreq converted */ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= get_user(r4.rt_window, &(ur4->rt_window)); ret |= get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return compat_ifr_data_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCSHWTSTAMP: case SIOCGHWTSTAMP: return compat_ifr_data_ioctl(net, cmd, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
./CrossVul/dataset_final_sorted/CWE-19/c/good_5253_0
crossvul-cpp_data_bad_4935_0
/* * Copyright (C) 2005-2010 IBM Corporation * * Author: * Mimi Zohar <zohar@us.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * File: evm_main.c * implements evm_inode_setxattr, evm_inode_post_setxattr, * evm_inode_removexattr, and evm_verifyxattr */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/crypto.h> #include <linux/audit.h> #include <linux/xattr.h> #include <linux/integrity.h> #include <linux/evm.h> #include <crypto/hash.h> #include "evm.h" int evm_initialized; static char *integrity_status_msg[] = { "pass", "fail", "no_label", "no_xattrs", "unknown" }; char *evm_hmac = "hmac(sha1)"; char *evm_hash = "sha1"; int evm_hmac_attrs; char *evm_config_xattrnames[] = { #ifdef CONFIG_SECURITY_SELINUX XATTR_NAME_SELINUX, #endif #ifdef CONFIG_SECURITY_SMACK XATTR_NAME_SMACK, #ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS XATTR_NAME_SMACKEXEC, XATTR_NAME_SMACKTRANSMUTE, XATTR_NAME_SMACKMMAP, #endif #endif #ifdef CONFIG_IMA_APPRAISE XATTR_NAME_IMA, #endif XATTR_NAME_CAPS, NULL }; static int evm_fixmode; static int __init evm_set_fixmode(char *str) { if (strncmp(str, "fix", 3) == 0) evm_fixmode = 1; return 0; } __setup("evm=", evm_set_fixmode); static void __init evm_init_config(void) { #ifdef CONFIG_EVM_ATTR_FSUUID evm_hmac_attrs |= EVM_ATTR_FSUUID; #endif pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs); } static int evm_find_protected_xattrs(struct dentry *dentry) { struct inode *inode = d_backing_inode(dentry); char **xattr; int error; int count = 0; if (!inode->i_op->getxattr) return -EOPNOTSUPP; for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) { error = inode->i_op->getxattr(dentry, *xattr, NULL, 0); if (error < 0) { if (error == -ENODATA) continue; return error; } count++; } return count; } /* * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr * * Compute the HMAC on the dentry's protected set of extended attributes * and compare it against the stored security.evm xattr. * * For performance: * - use the previoulsy retrieved xattr value and length to calculate the * HMAC.) * - cache the verification result in the iint, when available. * * Returns integrity status */ static enum integrity_status evm_verify_hmac(struct dentry *dentry, const char *xattr_name, char *xattr_value, size_t xattr_value_len, struct integrity_iint_cache *iint) { struct evm_ima_xattr_data *xattr_data = NULL; struct evm_ima_xattr_data calc; enum integrity_status evm_status = INTEGRITY_PASS; int rc, xattr_len; if (iint && iint->evm_status == INTEGRITY_PASS) return iint->evm_status; /* if status is not PASS, try to check again - against -ENOMEM */ /* first need to know the sig type */ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0, GFP_NOFS); if (rc <= 0) { evm_status = INTEGRITY_FAIL; if (rc == -ENODATA) { rc = evm_find_protected_xattrs(dentry); if (rc > 0) evm_status = INTEGRITY_NOLABEL; else if (rc == 0) evm_status = INTEGRITY_NOXATTRS; /* new file */ } else if (rc == -EOPNOTSUPP) { evm_status = INTEGRITY_UNKNOWN; } goto out; } xattr_len = rc; /* check value type */ switch (xattr_data->type) { case EVM_XATTR_HMAC: rc = evm_calc_hmac(dentry, xattr_name, xattr_value, xattr_value_len, calc.digest); if (rc) break; rc = memcmp(xattr_data->digest, calc.digest, sizeof(calc.digest)); if (rc) rc = -EINVAL; break; case EVM_IMA_XATTR_DIGSIG: rc = evm_calc_hash(dentry, xattr_name, xattr_value, xattr_value_len, calc.digest); if (rc) break; rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM, (const char *)xattr_data, xattr_len, calc.digest, sizeof(calc.digest)); if (!rc) { /* Replace RSA with HMAC if not mounted readonly and * not immutable */ if (!IS_RDONLY(d_backing_inode(dentry)) && !IS_IMMUTABLE(d_backing_inode(dentry))) evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len); } break; default: rc = -EINVAL; break; } if (rc) evm_status = (rc == -ENODATA) ? INTEGRITY_NOXATTRS : INTEGRITY_FAIL; out: if (iint) iint->evm_status = evm_status; kfree(xattr_data); return evm_status; } static int evm_protected_xattr(const char *req_xattr_name) { char **xattrname; int namelen; int found = 0; namelen = strlen(req_xattr_name); for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) { if ((strlen(*xattrname) == namelen) && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) { found = 1; break; } if (strncmp(req_xattr_name, *xattrname + XATTR_SECURITY_PREFIX_LEN, strlen(req_xattr_name)) == 0) { found = 1; break; } } return found; } /** * evm_verifyxattr - verify the integrity of the requested xattr * @dentry: object of the verify xattr * @xattr_name: requested xattr * @xattr_value: requested xattr value * @xattr_value_len: requested xattr value length * * Calculate the HMAC for the given dentry and verify it against the stored * security.evm xattr. For performance, use the xattr value and length * previously retrieved to calculate the HMAC. * * Returns the xattr integrity status. * * This function requires the caller to lock the inode's i_mutex before it * is executed. */ enum integrity_status evm_verifyxattr(struct dentry *dentry, const char *xattr_name, void *xattr_value, size_t xattr_value_len, struct integrity_iint_cache *iint) { if (!evm_initialized || !evm_protected_xattr(xattr_name)) return INTEGRITY_UNKNOWN; if (!iint) { iint = integrity_iint_find(d_backing_inode(dentry)); if (!iint) return INTEGRITY_UNKNOWN; } return evm_verify_hmac(dentry, xattr_name, xattr_value, xattr_value_len, iint); } EXPORT_SYMBOL_GPL(evm_verifyxattr); /* * evm_verify_current_integrity - verify the dentry's metadata integrity * @dentry: pointer to the affected dentry * * Verify and return the dentry's metadata integrity. The exceptions are * before EVM is initialized or in 'fix' mode. */ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry) { struct inode *inode = d_backing_inode(dentry); if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode) return 0; return evm_verify_hmac(dentry, NULL, NULL, 0, NULL); } /* * evm_protect_xattr - protect the EVM extended attribute * * Prevent security.evm from being modified or removed without the * necessary permissions or when the existing value is invalid. * * The posix xattr acls are 'system' prefixed, which normally would not * affect security.evm. An interesting side affect of writing posix xattr * acls is their modifying of the i_mode, which is included in security.evm. * For posix xattr acls only, permit security.evm, even if it currently * doesn't exist, to be updated. */ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len) { enum integrity_status evm_status; if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; } else if (!evm_protected_xattr(xattr_name)) { if (!posix_xattr_acl(xattr_name)) return 0; evm_status = evm_verify_current_integrity(dentry); if ((evm_status == INTEGRITY_PASS) || (evm_status == INTEGRITY_NOXATTRS)) return 0; goto out; } evm_status = evm_verify_current_integrity(dentry); if (evm_status == INTEGRITY_NOXATTRS) { struct integrity_iint_cache *iint; iint = integrity_iint_find(d_backing_inode(dentry)); if (iint && (iint->flags & IMA_NEW_FILE)) return 0; /* exception for pseudo filesystems */ if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC) return 0; integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode, dentry->d_name.name, "update_metadata", integrity_status_msg[evm_status], -EPERM, 0); } out: if (evm_status != INTEGRITY_PASS) integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry), dentry->d_name.name, "appraise_metadata", integrity_status_msg[evm_status], -EPERM, 0); return evm_status == INTEGRITY_PASS ? 0 : -EPERM; } /** * evm_inode_setxattr - protect the EVM extended attribute * @dentry: pointer to the affected dentry * @xattr_name: pointer to the affected extended attribute name * @xattr_value: pointer to the new extended attribute value * @xattr_value_len: pointer to the new extended attribute value length * * Before allowing the 'security.evm' protected xattr to be updated, * verify the existing value is valid. As only the kernel should have * access to the EVM encrypted key needed to calculate the HMAC, prevent * userspace from writing HMAC value. Writing 'security.evm' requires * requires CAP_SYS_ADMIN privileges. */ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len) { const struct evm_ima_xattr_data *xattr_data = xattr_value; if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { if (!xattr_value_len) return -EINVAL; if (xattr_data->type != EVM_IMA_XATTR_DIGSIG) return -EPERM; } return evm_protect_xattr(dentry, xattr_name, xattr_value, xattr_value_len); } /** * evm_inode_removexattr - protect the EVM extended attribute * @dentry: pointer to the affected dentry * @xattr_name: pointer to the affected extended attribute name * * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that * the current value is valid. */ int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name) { return evm_protect_xattr(dentry, xattr_name, NULL, 0); } static void evm_reset_status(struct inode *inode) { struct integrity_iint_cache *iint; iint = integrity_iint_find(inode); if (iint) iint->evm_status = INTEGRITY_UNKNOWN; } /** * evm_inode_post_setxattr - update 'security.evm' to reflect the changes * @dentry: pointer to the affected dentry * @xattr_name: pointer to the affected extended attribute name * @xattr_value: pointer to the new extended attribute value * @xattr_value_len: pointer to the new extended attribute value length * * Update the HMAC stored in 'security.evm' to reflect the change. * * No need to take the i_mutex lock here, as this function is called from * __vfs_setxattr_noperm(). The caller of which has taken the inode's * i_mutex lock. */ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len) { if (!evm_initialized || (!evm_protected_xattr(xattr_name) && !posix_xattr_acl(xattr_name))) return; evm_reset_status(dentry->d_inode); evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len); } /** * evm_inode_post_removexattr - update 'security.evm' after removing the xattr * @dentry: pointer to the affected dentry * @xattr_name: pointer to the affected extended attribute name * * Update the HMAC stored in 'security.evm' to reflect removal of the xattr. * * No need to take the i_mutex lock here, as this function is called from * vfs_removexattr() which takes the i_mutex. */ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name) { if (!evm_initialized || !evm_protected_xattr(xattr_name)) return; evm_reset_status(dentry->d_inode); evm_update_evmxattr(dentry, xattr_name, NULL, 0); } /** * evm_inode_setattr - prevent updating an invalid EVM extended attribute * @dentry: pointer to the affected dentry */ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; enum integrity_status evm_status; if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))) return 0; evm_status = evm_verify_current_integrity(dentry); if ((evm_status == INTEGRITY_PASS) || (evm_status == INTEGRITY_NOXATTRS)) return 0; integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry), dentry->d_name.name, "appraise_metadata", integrity_status_msg[evm_status], -EPERM, 0); return -EPERM; } /** * evm_inode_post_setattr - update 'security.evm' after modifying metadata * @dentry: pointer to the affected dentry * @ia_valid: for the UID and GID status * * For now, update the HMAC stored in 'security.evm' to reflect UID/GID * changes. * * This function is called from notify_change(), which expects the caller * to lock the inode's i_mutex. */ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) { if (!evm_initialized) return; if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) evm_update_evmxattr(dentry, NULL, NULL, 0); } /* * evm_inode_init_security - initializes security.evm */ int evm_inode_init_security(struct inode *inode, const struct xattr *lsm_xattr, struct xattr *evm_xattr) { struct evm_ima_xattr_data *xattr_data; int rc; if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name)) return 0; xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS); if (!xattr_data) return -ENOMEM; xattr_data->type = EVM_XATTR_HMAC; rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest); if (rc < 0) goto out; evm_xattr->value = xattr_data; evm_xattr->value_len = sizeof(*xattr_data); evm_xattr->name = XATTR_EVM_SUFFIX; return 0; out: kfree(xattr_data); return rc; } EXPORT_SYMBOL_GPL(evm_inode_init_security); #ifdef CONFIG_EVM_LOAD_X509 void __init evm_load_x509(void) { int rc; rc = integrity_load_x509(INTEGRITY_KEYRING_EVM, CONFIG_EVM_X509_PATH); if (!rc) evm_initialized |= EVM_INIT_X509; } #endif static int __init init_evm(void) { int error; evm_init_config(); error = integrity_init_keyring(INTEGRITY_KEYRING_EVM); if (error) return error; error = evm_init_secfs(); if (error < 0) { pr_info("Error registering secfs\n"); return error; } return 0; } /* * evm_display_config - list the EVM protected security extended attributes */ static int __init evm_display_config(void) { char **xattrname; for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) pr_info("%s\n", *xattrname); return 0; } pure_initcall(evm_display_config); late_initcall(init_evm); MODULE_DESCRIPTION("Extended Verification Module"); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-19/c/bad_4935_0
crossvul-cpp_data_bad_4855_1
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Revised: 2/18/01 BAR -- added syntax for extracting single images from * multi-image TIFF files. * * New syntax is: sourceFileName,image# * * image# ranges from 0..<n-1> where n is the # of images in the file. * There may be no white space between the comma and the filename or * image number. * * Example: tiffcp source.tif,1 destination.tif * * Copies the 2nd image in source.tif to the destination. * ***** * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tif_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "tiffio.h" #ifndef HAVE_GETOPT extern int getopt(int, char**, char*); #endif #if defined(VMS) # define unlink delete #endif #define streq(a,b) (strcmp(a,b) == 0) #define strneq(a,b,n) (strncmp(a,b,n) == 0) #define TRUE 1 #define FALSE 0 static int outtiled = -1; static uint32 tilewidth; static uint32 tilelength; static uint16 config; static uint16 compression; static uint16 predictor; static int preset; static uint16 fillorder; static uint16 orientation; static uint32 rowsperstrip; static uint32 g3opts; static int ignore = FALSE; /* if true, ignore read errors */ static uint32 defg3opts = (uint32) -1; static int quality = 75; /* JPEG quality */ static int jpegcolormode = JPEGCOLORMODE_RGB; static uint16 defcompression = (uint16) -1; static uint16 defpredictor = (uint16) -1; static int defpreset = -1; static int tiffcp(TIFF*, TIFF*); static int processCompressOptions(char*); static void usage(void); static char comma = ','; /* (default) comma separator character */ static TIFF* bias = NULL; static int pageNum = 0; static int pageInSeq = 0; static int nextSrcImage (TIFF *tif, char **imageSpec) /* seek to the next image specified in *imageSpec returns 1 if success, 0 if no more images to process *imageSpec=NULL if subsequent images should be processed in sequence */ { if (**imageSpec == comma) { /* if not @comma, we've done all images */ char *start = *imageSpec + 1; tdir_t nextImage = (tdir_t)strtol(start, imageSpec, 0); if (start == *imageSpec) nextImage = TIFFCurrentDirectory (tif); if (**imageSpec) { if (**imageSpec == comma) { /* a trailing comma denotes remaining images in sequence */ if ((*imageSpec)[1] == '\0') *imageSpec = NULL; }else{ fprintf (stderr, "Expected a %c separated image # list after %s\n", comma, TIFFFileName (tif)); exit (-4); /* syntax error */ } } if (TIFFSetDirectory (tif, nextImage)) return 1; fprintf (stderr, "%s%c%d not found!\n", TIFFFileName(tif), comma, (int) nextImage); } return 0; } static TIFF* openSrcImage (char **imageSpec) /* imageSpec points to a pointer to a filename followed by optional ,image#'s Open the TIFF file and assign *imageSpec to either NULL if there are no images specified, or a pointer to the next image number text */ { TIFF *tif; char *fn = *imageSpec; *imageSpec = strchr (fn, comma); if (*imageSpec) { /* there is at least one image number specifier */ **imageSpec = '\0'; tif = TIFFOpen (fn, "r"); /* but, ignore any single trailing comma */ if (!(*imageSpec)[1]) {*imageSpec = NULL; return tif;} if (tif) { **imageSpec = comma; /* replace the comma */ if (!nextSrcImage(tif, imageSpec)) { TIFFClose (tif); tif = NULL; } } }else tif = TIFFOpen (fn, "r"); return tif; } int main(int argc, char* argv[]) { uint16 defconfig = (uint16) -1; uint16 deffillorder = 0; uint32 deftilewidth = (uint32) -1; uint32 deftilelength = (uint32) -1; uint32 defrowsperstrip = (uint32) 0; uint64 diroff = 0; TIFF* in; TIFF* out; char mode[10]; char* mp = mode; int c; #if !HAVE_DECL_OPTARG extern int optind; extern char* optarg; #endif *mp++ = 'w'; *mp = '\0'; while ((c = getopt(argc, argv, ",:b:c:f:l:o:p:r:w:aistBLMC8x")) != -1) switch (c) { case ',': if (optarg[0] != '=') usage(); comma = optarg[1]; break; case 'b': /* this file is bias image subtracted from others */ if (bias) { fputs ("Only 1 bias image may be specified\n", stderr); exit (-2); } { uint16 samples = (uint16) -1; char **biasFn = &optarg; bias = openSrcImage (biasFn); if (!bias) exit (-5); if (TIFFIsTiled (bias)) { fputs ("Bias image must be organized in strips\n", stderr); exit (-7); } TIFFGetField(bias, TIFFTAG_SAMPLESPERPIXEL, &samples); if (samples != 1) { fputs ("Bias image must be monochrome\n", stderr); exit (-7); } } break; case 'a': /* append to output */ mode[0] = 'a'; break; case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'f': /* fill order */ if (streq(optarg, "lsb2msb")) deffillorder = FILLORDER_LSB2MSB; else if (streq(optarg, "msb2lsb")) deffillorder = FILLORDER_MSB2LSB; else usage(); break; case 'i': /* ignore errors */ ignore = TRUE; break; case 'l': /* tile length */ outtiled = TRUE; deftilelength = atoi(optarg); break; case 'o': /* initial directory offset */ diroff = strtoul(optarg, NULL, 0); break; case 'p': /* planar configuration */ if (streq(optarg, "separate")) defconfig = PLANARCONFIG_SEPARATE; else if (streq(optarg, "contig")) defconfig = PLANARCONFIG_CONTIG; else usage(); break; case 'r': /* rows/strip */ defrowsperstrip = atol(optarg); break; case 's': /* generate stripped output */ outtiled = FALSE; break; case 't': /* generate tiled output */ outtiled = TRUE; break; case 'w': /* tile width */ outtiled = TRUE; deftilewidth = atoi(optarg); break; case 'B': *mp++ = 'b'; *mp = '\0'; break; case 'L': *mp++ = 'l'; *mp = '\0'; break; case 'M': *mp++ = 'm'; *mp = '\0'; break; case 'C': *mp++ = 'c'; *mp = '\0'; break; case '8': *mp++ = '8'; *mp = '\0'; break; case 'x': pageInSeq = 1; break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind < 2) usage(); out = TIFFOpen(argv[argc-1], mode); if (out == NULL) return (-2); if ((argc - optind) == 2) pageNum = -1; for (; optind < argc-1 ; optind++) { char *imageCursor = argv[optind]; in = openSrcImage (&imageCursor); if (in == NULL) { (void) TIFFClose(out); return (-3); } if (diroff != 0 && !TIFFSetSubDirectory(in, diroff)) { TIFFError(TIFFFileName(in), "Error, setting subdirectory at " TIFF_UINT64_FORMAT, diroff); (void) TIFFClose(in); (void) TIFFClose(out); return (1); } for (;;) { config = defconfig; compression = defcompression; predictor = defpredictor; preset = defpreset; fillorder = deffillorder; rowsperstrip = defrowsperstrip; tilewidth = deftilewidth; tilelength = deftilelength; g3opts = defg3opts; if (!tiffcp(in, out) || !TIFFWriteDirectory(out)) { (void) TIFFClose(in); (void) TIFFClose(out); return (1); } if (imageCursor) { /* seek next image directory */ if (!nextSrcImage(in, &imageCursor)) break; }else if (!TIFFReadDirectory(in)) break; } (void) TIFFClose(in); } (void) TIFFClose(out); return (0); } static void processZIPOptions(char* cp) { if ( (cp = strchr(cp, ':')) ) { do { cp++; if (isdigit((int)*cp)) defpredictor = atoi(cp); else if (*cp == 'p') defpreset = atoi(++cp); else usage(); } while( (cp = strchr(cp, ':')) ); } } static void processG3Options(char* cp) { if( (cp = strchr(cp, ':')) ) { if (defg3opts == (uint32) -1) defg3opts = 0; do { cp++; if (strneq(cp, "1d", 2)) defg3opts &= ~GROUP3OPT_2DENCODING; else if (strneq(cp, "2d", 2)) defg3opts |= GROUP3OPT_2DENCODING; else if (strneq(cp, "fill", 4)) defg3opts |= GROUP3OPT_FILLBITS; else usage(); } while( (cp = strchr(cp, ':')) ); } } static int processCompressOptions(char* opt) { if (streq(opt, "none")) { defcompression = COMPRESSION_NONE; } else if (streq(opt, "packbits")) { defcompression = COMPRESSION_PACKBITS; } else if (strneq(opt, "jpeg", 4)) { char* cp = strchr(opt, ':'); defcompression = COMPRESSION_JPEG; while( cp ) { if (isdigit((int)cp[1])) quality = atoi(cp+1); else if (cp[1] == 'r' ) jpegcolormode = JPEGCOLORMODE_RAW; else usage(); cp = strchr(cp+1,':'); } } else if (strneq(opt, "g3", 2)) { processG3Options(opt); defcompression = COMPRESSION_CCITTFAX3; } else if (streq(opt, "g4")) { defcompression = COMPRESSION_CCITTFAX4; } else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) defpredictor = atoi(cp+1); defcompression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { processZIPOptions(opt); defcompression = COMPRESSION_ADOBE_DEFLATE; } else if (strneq(opt, "lzma", 4)) { processZIPOptions(opt); defcompression = COMPRESSION_LZMA; } else if (strneq(opt, "jbig", 4)) { defcompression = COMPRESSION_JBIG; } else if (strneq(opt, "sgilog", 6)) { defcompression = COMPRESSION_SGILOG; } else return (0); return (1); } char* stuff[] = { "usage: tiffcp [options] input... output", "where options are:", " -a append to output instead of overwriting", " -o offset set initial directory offset", " -p contig pack samples contiguously (e.g. RGBRGB...)", " -p separate store samples separately (e.g. RRR...GGG...BBB...)", " -s write output in strips", " -t write output in tiles", " -x force the merged tiff pages in sequence", " -8 write BigTIFF instead of default ClassicTIFF", " -B write big-endian instead of native byte order", " -L write little-endian instead of native byte order", " -M disable use of memory-mapped files", " -C disable strip chopping", " -i ignore read errors", " -b file[,#] bias (dark) monochrome image to be subtracted from all others", " -,=% use % rather than , to separate image #'s (per Note below)", "", " -r # make each strip have no more than # rows", " -w # set output tile width (pixels)", " -l # set output tile length (pixels)", "", " -f lsb2msb force lsb-to-msb FillOrder for output", " -f msb2lsb force msb-to-lsb FillOrder for output", "", " -c lzw[:opts] compress output with Lempel-Ziv & Welch encoding", " -c zip[:opts] compress output with deflate encoding", " -c lzma[:opts] compress output with LZMA2 encoding", " -c jpeg[:opts] compress output with JPEG encoding", " -c jbig compress output with ISO JBIG encoding", " -c packbits compress output with packbits encoding", " -c g3[:opts] compress output with CCITT Group 3 encoding", " -c g4 compress output with CCITT Group 4 encoding", " -c sgilog compress output with SGILOG encoding", " -c none use no compression algorithm on output", "", "Group 3 options:", " 1d use default CCITT Group 3 1D-encoding", " 2d use optional CCITT Group 3 2D-encoding", " fill byte-align EOL codes", "For example, -c g3:2d:fill to get G3-2D-encoded data with byte-aligned EOLs", "", "JPEG options:", " # set compression quality level (0-100, default 75)", " r output color image as RGB rather than YCbCr", "For example, -c jpeg:r:50 to get JPEG-encoded RGB data with 50% comp. quality", "", "LZW, Deflate (ZIP) and LZMA2 options:", " # set predictor value", " p# set compression level (preset)", "For example, -c lzw:2 to get LZW-encoded data with horizontal differencing,", "-c zip:3:p9 for Deflate encoding with maximum compression level and floating", "point predictor.", "", "Note that input filenames may be of the form filename,x,y,z", "where x, y, and z specify image numbers in the filename to copy.", "example: tiffcp -c none -b esp.tif,1 esp.tif,0 test.tif", " subtract 2nd image in esp.tif from 1st yielding uncompressed result test.tif", NULL }; static void usage(void) { char buf[BUFSIZ]; int i; setbuf(stderr, buf); fprintf(stderr, "%s\n\n", TIFFGetVersion()); for (i = 0; stuff[i] != NULL; i++) fprintf(stderr, "%s\n", stuff[i]); exit(-1); } #define CopyField(tag, v) \ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v) #define CopyField2(tag, v1, v2) \ if (TIFFGetField(in, tag, &v1, &v2)) TIFFSetField(out, tag, v1, v2) #define CopyField3(tag, v1, v2, v3) \ if (TIFFGetField(in, tag, &v1, &v2, &v3)) TIFFSetField(out, tag, v1, v2, v3) #define CopyField4(tag, v1, v2, v3, v4) \ if (TIFFGetField(in, tag, &v1, &v2, &v3, &v4)) TIFFSetField(out, tag, v1, v2, v3, v4) static void cpTag(TIFF* in, TIFF* out, uint16 tag, uint16 count, TIFFDataType type) { switch (type) { case TIFF_SHORT: if (count == 1) { uint16 shortv; CopyField(tag, shortv); } else if (count == 2) { uint16 shortv1, shortv2; CopyField2(tag, shortv1, shortv2); } else if (count == 4) { uint16 *tr, *tg, *tb, *ta; CopyField4(tag, tr, tg, tb, ta); } else if (count == (uint16) -1) { uint16 shortv1; uint16* shortav; CopyField2(tag, shortv1, shortav); } break; case TIFF_LONG: { uint32 longv; CopyField(tag, longv); } break; case TIFF_RATIONAL: if (count == 1) { float floatv; CopyField(tag, floatv); } else if (count == (uint16) -1) { float* floatav; CopyField(tag, floatav); } break; case TIFF_ASCII: { char* stringv; CopyField(tag, stringv); } break; case TIFF_DOUBLE: if (count == 1) { double doublev; CopyField(tag, doublev); } else if (count == (uint16) -1) { double* doubleav; CopyField(tag, doubleav); } break; default: TIFFError(TIFFFileName(in), "Data type %d is not supported, tag %d skipped.", tag, type); } } static struct cpTag { uint16 tag; uint16 count; TIFFDataType type; } tags[] = { { TIFFTAG_SUBFILETYPE, 1, TIFF_LONG }, { TIFFTAG_THRESHHOLDING, 1, TIFF_SHORT }, { TIFFTAG_DOCUMENTNAME, 1, TIFF_ASCII }, { TIFFTAG_IMAGEDESCRIPTION, 1, TIFF_ASCII }, { TIFFTAG_MAKE, 1, TIFF_ASCII }, { TIFFTAG_MODEL, 1, TIFF_ASCII }, { TIFFTAG_MINSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_MAXSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_XRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_YRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_PAGENAME, 1, TIFF_ASCII }, { TIFFTAG_XPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_YPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_RESOLUTIONUNIT, 1, TIFF_SHORT }, { TIFFTAG_SOFTWARE, 1, TIFF_ASCII }, { TIFFTAG_DATETIME, 1, TIFF_ASCII }, { TIFFTAG_ARTIST, 1, TIFF_ASCII }, { TIFFTAG_HOSTCOMPUTER, 1, TIFF_ASCII }, { TIFFTAG_WHITEPOINT, (uint16) -1, TIFF_RATIONAL }, { TIFFTAG_PRIMARYCHROMATICITIES,(uint16) -1,TIFF_RATIONAL }, { TIFFTAG_HALFTONEHINTS, 2, TIFF_SHORT }, { TIFFTAG_INKSET, 1, TIFF_SHORT }, { TIFFTAG_DOTRANGE, 2, TIFF_SHORT }, { TIFFTAG_TARGETPRINTER, 1, TIFF_ASCII }, { TIFFTAG_SAMPLEFORMAT, 1, TIFF_SHORT }, { TIFFTAG_YCBCRCOEFFICIENTS, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_YCBCRSUBSAMPLING, 2, TIFF_SHORT }, { TIFFTAG_YCBCRPOSITIONING, 1, TIFF_SHORT }, { TIFFTAG_REFERENCEBLACKWHITE, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_EXTRASAMPLES, (uint16) -1, TIFF_SHORT }, { TIFFTAG_SMINSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_SMAXSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_STONITS, 1, TIFF_DOUBLE }, }; #define NTAGS (sizeof (tags) / sizeof (tags[0])) #define CopyTag(tag, count, type) cpTag(in, out, tag, count, type) typedef int (*copyFunc) (TIFF* in, TIFF* out, uint32 l, uint32 w, uint16 samplesperpixel); static copyFunc pickCopyFunc(TIFF*, TIFF*, uint16, uint16); /* PODD */ static int tiffcp(TIFF* in, TIFF* out) { uint16 bitspersample, samplesperpixel = 1; uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK; copyFunc cf; uint32 width, length; struct cpTag* p; CopyField(TIFFTAG_IMAGEWIDTH, width); CopyField(TIFFTAG_IMAGELENGTH, length); CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample); CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (compression != (uint16)-1) TIFFSetField(out, TIFFTAG_COMPRESSION, compression); else CopyField(TIFFTAG_COMPRESSION, compression); TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression); TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric); if (input_compression == COMPRESSION_JPEG) { /* Force conversion to RGB */ TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else if (input_photometric == PHOTOMETRIC_YCBCR) { /* Otherwise, can't handle subsampled input */ uint16 subsamplinghor,subsamplingver; TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING, &subsamplinghor, &subsamplingver); if (subsamplinghor!=1 || subsamplingver!=1) { fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n", TIFFFileName(in)); return FALSE; } } if (compression == COMPRESSION_JPEG) { if (input_photometric == PHOTOMETRIC_RGB && jpegcolormode == JPEGCOLORMODE_RGB) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); else TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric); } else if (compression == COMPRESSION_SGILOG || compression == COMPRESSION_SGILOG24) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, samplesperpixel == 1 ? PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV); else if (input_compression == COMPRESSION_JPEG && samplesperpixel == 3 ) { /* RGB conversion was forced above hence the output will be of the same type */ TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); } else CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT); if (fillorder != 0) TIFFSetField(out, TIFFTAG_FILLORDER, fillorder); else CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT); /* * Will copy `Orientation' tag from input image */ TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation); switch (orientation) { case ORIENTATION_BOTRIGHT: case ORIENTATION_RIGHTBOT: /* XXX */ TIFFWarning(TIFFFileName(in), "using bottom-left orientation"); orientation = ORIENTATION_BOTLEFT; /* fall thru... */ case ORIENTATION_LEFTBOT: /* XXX */ case ORIENTATION_BOTLEFT: break; case ORIENTATION_TOPRIGHT: case ORIENTATION_RIGHTTOP: /* XXX */ default: TIFFWarning(TIFFFileName(in), "using top-left orientation"); orientation = ORIENTATION_TOPLEFT; /* fall thru... */ case ORIENTATION_LEFTTOP: /* XXX */ case ORIENTATION_TOPLEFT: break; } TIFFSetField(out, TIFFTAG_ORIENTATION, orientation); /* * Choose tiles/strip for the output image according to * the command line arguments (-tiles, -strips) and the * structure of the input image. */ if (outtiled == -1) outtiled = TIFFIsTiled(in); if (outtiled) { /* * Setup output file's tile width&height. If either * is not specified, use either the value from the * input image or, if nothing is defined, use the * library default. */ if (tilewidth == (uint32) -1) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth); if (tilelength == (uint32) -1) TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength); TIFFDefaultTileSize(out, &tilewidth, &tilelength); TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth); TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength); } else { /* * RowsPerStrip is left unspecified: use either the * value from the input image or, if nothing is defined, * use the library default. */ if (rowsperstrip == (uint32) 0) { if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &rowsperstrip)) { rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip); } if (rowsperstrip > length && rowsperstrip != (uint32)-1) rowsperstrip = length; } else if (rowsperstrip == (uint32) -1) rowsperstrip = length; TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip); } if (config != (uint16) -1) TIFFSetField(out, TIFFTAG_PLANARCONFIG, config); else CopyField(TIFFTAG_PLANARCONFIG, config); if (samplesperpixel <= 4) CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT); CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT); /* SMinSampleValue & SMaxSampleValue */ switch (compression) { case COMPRESSION_JPEG: TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality); TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode); break; case COMPRESSION_JBIG: CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII); break; case COMPRESSION_LZW: case COMPRESSION_ADOBE_DEFLATE: case COMPRESSION_DEFLATE: case COMPRESSION_LZMA: if (predictor != (uint16)-1) TIFFSetField(out, TIFFTAG_PREDICTOR, predictor); else CopyField(TIFFTAG_PREDICTOR, predictor); if (preset != -1) { if (compression == COMPRESSION_ADOBE_DEFLATE || compression == COMPRESSION_DEFLATE) TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset); else if (compression == COMPRESSION_LZMA) TIFFSetField(out, TIFFTAG_LZMAPRESET, preset); } break; case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: if (compression == COMPRESSION_CCITTFAX3) { if (g3opts != (uint32) -1) TIFFSetField(out, TIFFTAG_GROUP3OPTIONS, g3opts); else CopyField(TIFFTAG_GROUP3OPTIONS, g3opts); } else CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG); CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG); CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); break; } { uint32 len32; void** data; if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data)) TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data); } { uint16 ninks; const char* inknames; if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) { TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks); if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) { int inknameslen = strlen(inknames) + 1; const char* cp = inknames; while (ninks > 1) { cp = strchr(cp, '\0'); cp++; inknameslen += (strlen(cp) + 1); ninks--; } TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames); } } } { unsigned short pg0, pg1; if (pageInSeq == 1) { if (pageNum < 0) /* only one input file */ { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); } else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } else { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) { if (pageNum < 0) /* only one input file */ TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } } } for (p = tags; p < &tags[NTAGS]; p++) CopyTag(p->tag, p->count, p->type); cf = pickCopyFunc(in, out, bitspersample, samplesperpixel); return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE); } /* * Copy Functions. */ #define DECLAREcpFunc(x) \ static int x(TIFF* in, TIFF* out, \ uint32 imagelength, uint32 imagewidth, tsample_t spp) #define DECLAREreadFunc(x) \ static int x(TIFF* in, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*readFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); #define DECLAREwriteFunc(x) \ static int x(TIFF* out, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*writeFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); /* * Contig -> contig by scanline for rows/strip change. */ DECLAREcpFunc(cpContig2ContigByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } typedef void biasFn (void *image, void *bias, uint32 pixels); #define subtract(bits) \ static void subtract##bits (void *i, void *b, uint32 pixels)\ {\ uint##bits *image = i;\ uint##bits *bias = b;\ while (pixels--) {\ *image = *image > *bias ? *image-*bias : 0;\ image++, bias++; \ } \ } subtract(8) subtract(16) subtract(32) static biasFn *lineSubtractFn (unsigned bits) { switch (bits) { case 8: return subtract8; case 16: return subtract16; case 32: return subtract32; } return NULL; } /* * Contig -> contig by scanline while subtracting a bias image. */ DECLAREcpFunc(cpBiasedContig2Contig) { if (spp == 1) { tsize_t biasSize = TIFFScanlineSize(bias); tsize_t bufSize = TIFFScanlineSize(in); tdata_t buf, biasBuf; uint32 biasWidth = 0, biasLength = 0; TIFFGetField(bias, TIFFTAG_IMAGEWIDTH, &biasWidth); TIFFGetField(bias, TIFFTAG_IMAGELENGTH, &biasLength); if (biasSize == bufSize && imagelength == biasLength && imagewidth == biasWidth) { uint16 sampleBits = 0; biasFn *subtractLine; TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &sampleBits); subtractLine = lineSubtractFn (sampleBits); if (subtractLine) { uint32 row; buf = _TIFFmalloc(bufSize); biasBuf = _TIFFmalloc(bufSize); for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFReadScanline(bias, biasBuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read biased scanline %lu", (unsigned long) row); goto bad; } subtractLine (buf, biasBuf, imagewidth); if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); _TIFFfree(biasBuf); TIFFSetDirectory(bias, TIFFCurrentDirectory(bias)); /* rewind */ return 1; bad: _TIFFfree(buf); _TIFFfree(biasBuf); return 0; } else { TIFFError(TIFFFileName(in), "No support for biasing %d bit pixels\n", sampleBits); return 0; } } TIFFError(TIFFFileName(in), "Bias image %s,%d\nis not the same size as %s,%d\n", TIFFFileName(bias), TIFFCurrentDirectory(bias), TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } else { TIFFError(TIFFFileName(in), "Can't bias %s,%d as it has >1 Sample/Pixel\n", TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } } /* * Strip -> strip for change in encoding. */ DECLAREcpFunc(cpDecodedStrips) { tsize_t stripsize = TIFFStripSize(in); tdata_t buf = _TIFFmalloc(stripsize); (void) imagewidth; (void) spp; if (buf) { tstrip_t s, ns = TIFFNumberOfStrips(in); uint32 row = 0; _TIFFmemset(buf, 0, stripsize); for (s = 0; s < ns; s++) { tsize_t cc = (row + rowsperstrip > imagelength) ? TIFFVStripSize(in, imagelength - row) : stripsize; if (TIFFReadEncodedStrip(in, s, buf, cc) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu", (unsigned long) s); goto bad; } if (TIFFWriteEncodedStrip(out, s, buf, cc) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %lu", (unsigned long) s); goto bad; } row += rowsperstrip; } _TIFFfree(buf); return 1; } else { TIFFError(TIFFFileName(in), "Error, can't allocate memory buffer of size %lu " "to read strips", (unsigned long) stripsize); return 0; } bad: _TIFFfree(buf); return 0; } /* * Separate -> separate by row for rows/strip change. */ DECLAREcpFunc(cpSeparate2SeparateByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; tsample_t s; (void) imagewidth; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } /* * Contig -> separate by row. */ DECLAREcpFunc(cpContig2SeparateByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); /* unpack channels */ for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, inbuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = ((uint8*)inbuf) + s; outp = (uint8*)outbuf; for (n = imagewidth; n-- > 0;) { *outp++ = *inp; inp += spp; } if (TIFFWriteScanline(out, outbuf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } /* * Separate -> contig by row. */ DECLAREcpFunc(cpSeparate2ContigByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { if (TIFFReadScanline(in, inbuf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = (uint8*)inbuf; outp = ((uint8*)outbuf) + s; for (n = imagewidth; n-- > 0;) { *outp = *inp++; outp += spp; } } if (TIFFWriteScanline(out, outbuf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } static void cpStripToTile(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) *out++ = *in++; out += outskew; in += inskew; } } static void cpContigBufToSeparateBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample ) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } in += (spp-1) * bytes_per_sample; } out += outskew; in += inskew; } } static void cpSeparateBufToContigBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } out += (spp-1)*bytes_per_sample; } out += outskew; in += inskew; } } static int cpImage(TIFF* in, TIFF* out, readFunc fin, writeFunc fout, uint32 imagelength, uint32 imagewidth, tsample_t spp) { int status = 0; tdata_t buf = NULL; tsize_t scanlinesize = TIFFRasterScanlineSize(in); tsize_t bytes = scanlinesize * (tsize_t)imagelength; /* * XXX: Check for integer overflow. */ if (scanlinesize && imagelength && bytes / (tsize_t)imagelength == scanlinesize) { buf = _TIFFmalloc(bytes); if (buf) { if ((*fin)(in, (uint8*)buf, imagelength, imagewidth, spp)) { status = (*fout)(out, (uint8*)buf, imagelength, imagewidth, spp); } _TIFFfree(buf); } else { TIFFError(TIFFFileName(in), "Error, can't allocate space for image buffer"); } } else { TIFFError(TIFFFileName(in), "Error, no space for image buffer"); } return status; } DECLAREreadFunc(readContigStripsIntoBuffer) { tsize_t scanlinesize = TIFFScanlineSize(in); uint8* bufp = buf; uint32 row; (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, (tdata_t) bufp, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); return 0; } bufp += scanlinesize; } return 1; } DECLAREreadFunc(readSeparateStripsIntoBuffer) { int status = 1; tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t scanline; if (!scanlinesize) return 0; scanline = _TIFFmalloc(scanlinesize); if (!scanline) return 0; _TIFFmemset(scanline, 0, scanlinesize); (void) imagewidth; if (scanline) { uint8* bufp = (uint8*) buf; uint32 row; tsample_t s; for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { uint8* bp = bufp + s; tsize_t n = scanlinesize; uint8* sbuf = scanline; if (TIFFReadScanline(in, scanline, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); status = 0; goto done; } while (n-- > 0) *bp = *sbuf++, bp += spp; } bufp += scanlinesize * spp; } } done: _TIFFfree(scanline); return status; } DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb + tilew > imagew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREreadFunc(readSeparateTilesIntoBuffer) { int status = 1; uint32 imagew = TIFFRasterScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew*spp; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; uint16 bps, bytes_per_sample; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { if (TIFFReadTile(in, tilebuf, col, row, 0, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu, " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); status = 0; goto done; } /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew*spp > imagew) { uint32 width = imagew - colb; int oskew = tilew*spp - width; cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, width/(spp*bytes_per_sample), oskew + iskew, oskew/spp, spp, bytes_per_sample); } else cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, tw, iskew, 0, spp, bytes_per_sample); } colb += tilew*spp; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREwriteFunc(writeBufferToContigStrips) { uint32 row, rowsperstrip; tstrip_t strip = 0; (void) imagewidth; (void) spp; (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); if (TIFFWriteEncodedStrip(out, strip++, buf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); return 0; } buf += stripsize; } return 1; } DECLAREwriteFunc(writeBufferToSeparateStrips) { uint32 rowsize = imagewidth * spp; uint32 rowsperstrip; tsize_t stripsize = TIFFStripSize(out); tdata_t obuf; tstrip_t strip = 0; tsample_t s; obuf = _TIFFmalloc(stripsize); if (obuf == NULL) return (0); _TIFFmemset(obuf, 0, stripsize); (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (s = 0; s < spp; s++) { uint32 row; for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); cpContigBufToSeparateBuf( obuf, (uint8*) buf + row*rowsize + s, nrows, imagewidth, 0, 0, spp, 1); if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); _TIFFfree(obuf); return 0; } } } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToSeparateTiles) { uint32 imagew = TIFFScanlineSize(out); tsize_t tilew = TIFFTileRowSize(out); uint32 iimagew = TIFFRasterScanlineSize(out); int iskew = iimagew - tilew*spp; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; uint16 bps, bytes_per_sample; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps); assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = (imagew - colb); int oskew = tilew - width; cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, width/bytes_per_sample, oskew, (oskew*spp)+iskew, spp, bytes_per_sample); } else cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, tilewidth, 0, iskew, spp, bytes_per_sample); if (TIFFWriteTile(out, obuf, col, row, 0, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); _TIFFfree(obuf); return 0; } } colb += tilew; } bufp += nrow * iimagew; } _TIFFfree(obuf); return 1; } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigStrips2ContigTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig strips -> separate tiles. */ DECLAREcpFunc(cpContigStrips2SeparateTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate strips -> contig tiles. */ DECLAREcpFunc(cpSeparateStrips2ContigTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate strips -> separate tiles. */ DECLAREcpFunc(cpSeparateStrips2SeparateTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigTiles2ContigTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> separate tiles. */ DECLAREcpFunc(cpContigTiles2SeparateTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> contig tiles. */ DECLAREcpFunc(cpSeparateTiles2ContigTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> separate tiles (tile dimension change). */ DECLAREcpFunc(cpSeparateTiles2SeparateTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> contig tiles (tile dimension change). */ DECLAREcpFunc(cpContigTiles2ContigStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Contig tiles -> separate strips. */ DECLAREcpFunc(cpContigTiles2SeparateStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> contig strips. */ DECLAREcpFunc(cpSeparateTiles2ContigStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> separate strips. */ DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Select the appropriate copy function to use. */ static copyFunc pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
./CrossVul/dataset_final_sorted/CWE-191/c/bad_4855_1
crossvul-cpp_data_good_4855_1
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Revised: 2/18/01 BAR -- added syntax for extracting single images from * multi-image TIFF files. * * New syntax is: sourceFileName,image# * * image# ranges from 0..<n-1> where n is the # of images in the file. * There may be no white space between the comma and the filename or * image number. * * Example: tiffcp source.tif,1 destination.tif * * Copies the 2nd image in source.tif to the destination. * ***** * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tif_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "tiffio.h" #ifndef HAVE_GETOPT extern int getopt(int, char**, char*); #endif #if defined(VMS) # define unlink delete #endif #define streq(a,b) (strcmp(a,b) == 0) #define strneq(a,b,n) (strncmp(a,b,n) == 0) #define TRUE 1 #define FALSE 0 static int outtiled = -1; static uint32 tilewidth; static uint32 tilelength; static uint16 config; static uint16 compression; static uint16 predictor; static int preset; static uint16 fillorder; static uint16 orientation; static uint32 rowsperstrip; static uint32 g3opts; static int ignore = FALSE; /* if true, ignore read errors */ static uint32 defg3opts = (uint32) -1; static int quality = 75; /* JPEG quality */ static int jpegcolormode = JPEGCOLORMODE_RGB; static uint16 defcompression = (uint16) -1; static uint16 defpredictor = (uint16) -1; static int defpreset = -1; static int tiffcp(TIFF*, TIFF*); static int processCompressOptions(char*); static void usage(void); static char comma = ','; /* (default) comma separator character */ static TIFF* bias = NULL; static int pageNum = 0; static int pageInSeq = 0; static int nextSrcImage (TIFF *tif, char **imageSpec) /* seek to the next image specified in *imageSpec returns 1 if success, 0 if no more images to process *imageSpec=NULL if subsequent images should be processed in sequence */ { if (**imageSpec == comma) { /* if not @comma, we've done all images */ char *start = *imageSpec + 1; tdir_t nextImage = (tdir_t)strtol(start, imageSpec, 0); if (start == *imageSpec) nextImage = TIFFCurrentDirectory (tif); if (**imageSpec) { if (**imageSpec == comma) { /* a trailing comma denotes remaining images in sequence */ if ((*imageSpec)[1] == '\0') *imageSpec = NULL; }else{ fprintf (stderr, "Expected a %c separated image # list after %s\n", comma, TIFFFileName (tif)); exit (-4); /* syntax error */ } } if (TIFFSetDirectory (tif, nextImage)) return 1; fprintf (stderr, "%s%c%d not found!\n", TIFFFileName(tif), comma, (int) nextImage); } return 0; } static TIFF* openSrcImage (char **imageSpec) /* imageSpec points to a pointer to a filename followed by optional ,image#'s Open the TIFF file and assign *imageSpec to either NULL if there are no images specified, or a pointer to the next image number text */ { TIFF *tif; char *fn = *imageSpec; *imageSpec = strchr (fn, comma); if (*imageSpec) { /* there is at least one image number specifier */ **imageSpec = '\0'; tif = TIFFOpen (fn, "r"); /* but, ignore any single trailing comma */ if (!(*imageSpec)[1]) {*imageSpec = NULL; return tif;} if (tif) { **imageSpec = comma; /* replace the comma */ if (!nextSrcImage(tif, imageSpec)) { TIFFClose (tif); tif = NULL; } } }else tif = TIFFOpen (fn, "r"); return tif; } int main(int argc, char* argv[]) { uint16 defconfig = (uint16) -1; uint16 deffillorder = 0; uint32 deftilewidth = (uint32) -1; uint32 deftilelength = (uint32) -1; uint32 defrowsperstrip = (uint32) 0; uint64 diroff = 0; TIFF* in; TIFF* out; char mode[10]; char* mp = mode; int c; #if !HAVE_DECL_OPTARG extern int optind; extern char* optarg; #endif *mp++ = 'w'; *mp = '\0'; while ((c = getopt(argc, argv, ",:b:c:f:l:o:p:r:w:aistBLMC8x")) != -1) switch (c) { case ',': if (optarg[0] != '=') usage(); comma = optarg[1]; break; case 'b': /* this file is bias image subtracted from others */ if (bias) { fputs ("Only 1 bias image may be specified\n", stderr); exit (-2); } { uint16 samples = (uint16) -1; char **biasFn = &optarg; bias = openSrcImage (biasFn); if (!bias) exit (-5); if (TIFFIsTiled (bias)) { fputs ("Bias image must be organized in strips\n", stderr); exit (-7); } TIFFGetField(bias, TIFFTAG_SAMPLESPERPIXEL, &samples); if (samples != 1) { fputs ("Bias image must be monochrome\n", stderr); exit (-7); } } break; case 'a': /* append to output */ mode[0] = 'a'; break; case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'f': /* fill order */ if (streq(optarg, "lsb2msb")) deffillorder = FILLORDER_LSB2MSB; else if (streq(optarg, "msb2lsb")) deffillorder = FILLORDER_MSB2LSB; else usage(); break; case 'i': /* ignore errors */ ignore = TRUE; break; case 'l': /* tile length */ outtiled = TRUE; deftilelength = atoi(optarg); break; case 'o': /* initial directory offset */ diroff = strtoul(optarg, NULL, 0); break; case 'p': /* planar configuration */ if (streq(optarg, "separate")) defconfig = PLANARCONFIG_SEPARATE; else if (streq(optarg, "contig")) defconfig = PLANARCONFIG_CONTIG; else usage(); break; case 'r': /* rows/strip */ defrowsperstrip = atol(optarg); break; case 's': /* generate stripped output */ outtiled = FALSE; break; case 't': /* generate tiled output */ outtiled = TRUE; break; case 'w': /* tile width */ outtiled = TRUE; deftilewidth = atoi(optarg); break; case 'B': *mp++ = 'b'; *mp = '\0'; break; case 'L': *mp++ = 'l'; *mp = '\0'; break; case 'M': *mp++ = 'm'; *mp = '\0'; break; case 'C': *mp++ = 'c'; *mp = '\0'; break; case '8': *mp++ = '8'; *mp = '\0'; break; case 'x': pageInSeq = 1; break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind < 2) usage(); out = TIFFOpen(argv[argc-1], mode); if (out == NULL) return (-2); if ((argc - optind) == 2) pageNum = -1; for (; optind < argc-1 ; optind++) { char *imageCursor = argv[optind]; in = openSrcImage (&imageCursor); if (in == NULL) { (void) TIFFClose(out); return (-3); } if (diroff != 0 && !TIFFSetSubDirectory(in, diroff)) { TIFFError(TIFFFileName(in), "Error, setting subdirectory at " TIFF_UINT64_FORMAT, diroff); (void) TIFFClose(in); (void) TIFFClose(out); return (1); } for (;;) { config = defconfig; compression = defcompression; predictor = defpredictor; preset = defpreset; fillorder = deffillorder; rowsperstrip = defrowsperstrip; tilewidth = deftilewidth; tilelength = deftilelength; g3opts = defg3opts; if (!tiffcp(in, out) || !TIFFWriteDirectory(out)) { (void) TIFFClose(in); (void) TIFFClose(out); return (1); } if (imageCursor) { /* seek next image directory */ if (!nextSrcImage(in, &imageCursor)) break; }else if (!TIFFReadDirectory(in)) break; } (void) TIFFClose(in); } (void) TIFFClose(out); return (0); } static void processZIPOptions(char* cp) { if ( (cp = strchr(cp, ':')) ) { do { cp++; if (isdigit((int)*cp)) defpredictor = atoi(cp); else if (*cp == 'p') defpreset = atoi(++cp); else usage(); } while( (cp = strchr(cp, ':')) ); } } static void processG3Options(char* cp) { if( (cp = strchr(cp, ':')) ) { if (defg3opts == (uint32) -1) defg3opts = 0; do { cp++; if (strneq(cp, "1d", 2)) defg3opts &= ~GROUP3OPT_2DENCODING; else if (strneq(cp, "2d", 2)) defg3opts |= GROUP3OPT_2DENCODING; else if (strneq(cp, "fill", 4)) defg3opts |= GROUP3OPT_FILLBITS; else usage(); } while( (cp = strchr(cp, ':')) ); } } static int processCompressOptions(char* opt) { if (streq(opt, "none")) { defcompression = COMPRESSION_NONE; } else if (streq(opt, "packbits")) { defcompression = COMPRESSION_PACKBITS; } else if (strneq(opt, "jpeg", 4)) { char* cp = strchr(opt, ':'); defcompression = COMPRESSION_JPEG; while( cp ) { if (isdigit((int)cp[1])) quality = atoi(cp+1); else if (cp[1] == 'r' ) jpegcolormode = JPEGCOLORMODE_RAW; else usage(); cp = strchr(cp+1,':'); } } else if (strneq(opt, "g3", 2)) { processG3Options(opt); defcompression = COMPRESSION_CCITTFAX3; } else if (streq(opt, "g4")) { defcompression = COMPRESSION_CCITTFAX4; } else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) defpredictor = atoi(cp+1); defcompression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { processZIPOptions(opt); defcompression = COMPRESSION_ADOBE_DEFLATE; } else if (strneq(opt, "lzma", 4)) { processZIPOptions(opt); defcompression = COMPRESSION_LZMA; } else if (strneq(opt, "jbig", 4)) { defcompression = COMPRESSION_JBIG; } else if (strneq(opt, "sgilog", 6)) { defcompression = COMPRESSION_SGILOG; } else return (0); return (1); } char* stuff[] = { "usage: tiffcp [options] input... output", "where options are:", " -a append to output instead of overwriting", " -o offset set initial directory offset", " -p contig pack samples contiguously (e.g. RGBRGB...)", " -p separate store samples separately (e.g. RRR...GGG...BBB...)", " -s write output in strips", " -t write output in tiles", " -x force the merged tiff pages in sequence", " -8 write BigTIFF instead of default ClassicTIFF", " -B write big-endian instead of native byte order", " -L write little-endian instead of native byte order", " -M disable use of memory-mapped files", " -C disable strip chopping", " -i ignore read errors", " -b file[,#] bias (dark) monochrome image to be subtracted from all others", " -,=% use % rather than , to separate image #'s (per Note below)", "", " -r # make each strip have no more than # rows", " -w # set output tile width (pixels)", " -l # set output tile length (pixels)", "", " -f lsb2msb force lsb-to-msb FillOrder for output", " -f msb2lsb force msb-to-lsb FillOrder for output", "", " -c lzw[:opts] compress output with Lempel-Ziv & Welch encoding", " -c zip[:opts] compress output with deflate encoding", " -c lzma[:opts] compress output with LZMA2 encoding", " -c jpeg[:opts] compress output with JPEG encoding", " -c jbig compress output with ISO JBIG encoding", " -c packbits compress output with packbits encoding", " -c g3[:opts] compress output with CCITT Group 3 encoding", " -c g4 compress output with CCITT Group 4 encoding", " -c sgilog compress output with SGILOG encoding", " -c none use no compression algorithm on output", "", "Group 3 options:", " 1d use default CCITT Group 3 1D-encoding", " 2d use optional CCITT Group 3 2D-encoding", " fill byte-align EOL codes", "For example, -c g3:2d:fill to get G3-2D-encoded data with byte-aligned EOLs", "", "JPEG options:", " # set compression quality level (0-100, default 75)", " r output color image as RGB rather than YCbCr", "For example, -c jpeg:r:50 to get JPEG-encoded RGB data with 50% comp. quality", "", "LZW, Deflate (ZIP) and LZMA2 options:", " # set predictor value", " p# set compression level (preset)", "For example, -c lzw:2 to get LZW-encoded data with horizontal differencing,", "-c zip:3:p9 for Deflate encoding with maximum compression level and floating", "point predictor.", "", "Note that input filenames may be of the form filename,x,y,z", "where x, y, and z specify image numbers in the filename to copy.", "example: tiffcp -c none -b esp.tif,1 esp.tif,0 test.tif", " subtract 2nd image in esp.tif from 1st yielding uncompressed result test.tif", NULL }; static void usage(void) { char buf[BUFSIZ]; int i; setbuf(stderr, buf); fprintf(stderr, "%s\n\n", TIFFGetVersion()); for (i = 0; stuff[i] != NULL; i++) fprintf(stderr, "%s\n", stuff[i]); exit(-1); } #define CopyField(tag, v) \ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v) #define CopyField2(tag, v1, v2) \ if (TIFFGetField(in, tag, &v1, &v2)) TIFFSetField(out, tag, v1, v2) #define CopyField3(tag, v1, v2, v3) \ if (TIFFGetField(in, tag, &v1, &v2, &v3)) TIFFSetField(out, tag, v1, v2, v3) #define CopyField4(tag, v1, v2, v3, v4) \ if (TIFFGetField(in, tag, &v1, &v2, &v3, &v4)) TIFFSetField(out, tag, v1, v2, v3, v4) static void cpTag(TIFF* in, TIFF* out, uint16 tag, uint16 count, TIFFDataType type) { switch (type) { case TIFF_SHORT: if (count == 1) { uint16 shortv; CopyField(tag, shortv); } else if (count == 2) { uint16 shortv1, shortv2; CopyField2(tag, shortv1, shortv2); } else if (count == 4) { uint16 *tr, *tg, *tb, *ta; CopyField4(tag, tr, tg, tb, ta); } else if (count == (uint16) -1) { uint16 shortv1; uint16* shortav; CopyField2(tag, shortv1, shortav); } break; case TIFF_LONG: { uint32 longv; CopyField(tag, longv); } break; case TIFF_RATIONAL: if (count == 1) { float floatv; CopyField(tag, floatv); } else if (count == (uint16) -1) { float* floatav; CopyField(tag, floatav); } break; case TIFF_ASCII: { char* stringv; CopyField(tag, stringv); } break; case TIFF_DOUBLE: if (count == 1) { double doublev; CopyField(tag, doublev); } else if (count == (uint16) -1) { double* doubleav; CopyField(tag, doubleav); } break; default: TIFFError(TIFFFileName(in), "Data type %d is not supported, tag %d skipped.", tag, type); } } static struct cpTag { uint16 tag; uint16 count; TIFFDataType type; } tags[] = { { TIFFTAG_SUBFILETYPE, 1, TIFF_LONG }, { TIFFTAG_THRESHHOLDING, 1, TIFF_SHORT }, { TIFFTAG_DOCUMENTNAME, 1, TIFF_ASCII }, { TIFFTAG_IMAGEDESCRIPTION, 1, TIFF_ASCII }, { TIFFTAG_MAKE, 1, TIFF_ASCII }, { TIFFTAG_MODEL, 1, TIFF_ASCII }, { TIFFTAG_MINSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_MAXSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_XRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_YRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_PAGENAME, 1, TIFF_ASCII }, { TIFFTAG_XPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_YPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_RESOLUTIONUNIT, 1, TIFF_SHORT }, { TIFFTAG_SOFTWARE, 1, TIFF_ASCII }, { TIFFTAG_DATETIME, 1, TIFF_ASCII }, { TIFFTAG_ARTIST, 1, TIFF_ASCII }, { TIFFTAG_HOSTCOMPUTER, 1, TIFF_ASCII }, { TIFFTAG_WHITEPOINT, (uint16) -1, TIFF_RATIONAL }, { TIFFTAG_PRIMARYCHROMATICITIES,(uint16) -1,TIFF_RATIONAL }, { TIFFTAG_HALFTONEHINTS, 2, TIFF_SHORT }, { TIFFTAG_INKSET, 1, TIFF_SHORT }, { TIFFTAG_DOTRANGE, 2, TIFF_SHORT }, { TIFFTAG_TARGETPRINTER, 1, TIFF_ASCII }, { TIFFTAG_SAMPLEFORMAT, 1, TIFF_SHORT }, { TIFFTAG_YCBCRCOEFFICIENTS, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_YCBCRSUBSAMPLING, 2, TIFF_SHORT }, { TIFFTAG_YCBCRPOSITIONING, 1, TIFF_SHORT }, { TIFFTAG_REFERENCEBLACKWHITE, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_EXTRASAMPLES, (uint16) -1, TIFF_SHORT }, { TIFFTAG_SMINSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_SMAXSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_STONITS, 1, TIFF_DOUBLE }, }; #define NTAGS (sizeof (tags) / sizeof (tags[0])) #define CopyTag(tag, count, type) cpTag(in, out, tag, count, type) typedef int (*copyFunc) (TIFF* in, TIFF* out, uint32 l, uint32 w, uint16 samplesperpixel); static copyFunc pickCopyFunc(TIFF*, TIFF*, uint16, uint16); /* PODD */ static int tiffcp(TIFF* in, TIFF* out) { uint16 bitspersample, samplesperpixel = 1; uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK; copyFunc cf; uint32 width, length; struct cpTag* p; CopyField(TIFFTAG_IMAGEWIDTH, width); CopyField(TIFFTAG_IMAGELENGTH, length); CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample); CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (compression != (uint16)-1) TIFFSetField(out, TIFFTAG_COMPRESSION, compression); else CopyField(TIFFTAG_COMPRESSION, compression); TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression); TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric); if (input_compression == COMPRESSION_JPEG) { /* Force conversion to RGB */ TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else if (input_photometric == PHOTOMETRIC_YCBCR) { /* Otherwise, can't handle subsampled input */ uint16 subsamplinghor,subsamplingver; TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING, &subsamplinghor, &subsamplingver); if (subsamplinghor!=1 || subsamplingver!=1) { fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n", TIFFFileName(in)); return FALSE; } } if (compression == COMPRESSION_JPEG) { if (input_photometric == PHOTOMETRIC_RGB && jpegcolormode == JPEGCOLORMODE_RGB) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); else TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric); } else if (compression == COMPRESSION_SGILOG || compression == COMPRESSION_SGILOG24) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, samplesperpixel == 1 ? PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV); else if (input_compression == COMPRESSION_JPEG && samplesperpixel == 3 ) { /* RGB conversion was forced above hence the output will be of the same type */ TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); } else CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT); if (fillorder != 0) TIFFSetField(out, TIFFTAG_FILLORDER, fillorder); else CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT); /* * Will copy `Orientation' tag from input image */ TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation); switch (orientation) { case ORIENTATION_BOTRIGHT: case ORIENTATION_RIGHTBOT: /* XXX */ TIFFWarning(TIFFFileName(in), "using bottom-left orientation"); orientation = ORIENTATION_BOTLEFT; /* fall thru... */ case ORIENTATION_LEFTBOT: /* XXX */ case ORIENTATION_BOTLEFT: break; case ORIENTATION_TOPRIGHT: case ORIENTATION_RIGHTTOP: /* XXX */ default: TIFFWarning(TIFFFileName(in), "using top-left orientation"); orientation = ORIENTATION_TOPLEFT; /* fall thru... */ case ORIENTATION_LEFTTOP: /* XXX */ case ORIENTATION_TOPLEFT: break; } TIFFSetField(out, TIFFTAG_ORIENTATION, orientation); /* * Choose tiles/strip for the output image according to * the command line arguments (-tiles, -strips) and the * structure of the input image. */ if (outtiled == -1) outtiled = TIFFIsTiled(in); if (outtiled) { /* * Setup output file's tile width&height. If either * is not specified, use either the value from the * input image or, if nothing is defined, use the * library default. */ if (tilewidth == (uint32) -1) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth); if (tilelength == (uint32) -1) TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength); TIFFDefaultTileSize(out, &tilewidth, &tilelength); TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth); TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength); } else { /* * RowsPerStrip is left unspecified: use either the * value from the input image or, if nothing is defined, * use the library default. */ if (rowsperstrip == (uint32) 0) { if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &rowsperstrip)) { rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip); } if (rowsperstrip > length && rowsperstrip != (uint32)-1) rowsperstrip = length; } else if (rowsperstrip == (uint32) -1) rowsperstrip = length; TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip); } if (config != (uint16) -1) TIFFSetField(out, TIFFTAG_PLANARCONFIG, config); else CopyField(TIFFTAG_PLANARCONFIG, config); if (samplesperpixel <= 4) CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT); CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT); /* SMinSampleValue & SMaxSampleValue */ switch (compression) { case COMPRESSION_JPEG: TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality); TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode); break; case COMPRESSION_JBIG: CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII); break; case COMPRESSION_LZW: case COMPRESSION_ADOBE_DEFLATE: case COMPRESSION_DEFLATE: case COMPRESSION_LZMA: if (predictor != (uint16)-1) TIFFSetField(out, TIFFTAG_PREDICTOR, predictor); else CopyField(TIFFTAG_PREDICTOR, predictor); if (preset != -1) { if (compression == COMPRESSION_ADOBE_DEFLATE || compression == COMPRESSION_DEFLATE) TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset); else if (compression == COMPRESSION_LZMA) TIFFSetField(out, TIFFTAG_LZMAPRESET, preset); } break; case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: if (compression == COMPRESSION_CCITTFAX3) { if (g3opts != (uint32) -1) TIFFSetField(out, TIFFTAG_GROUP3OPTIONS, g3opts); else CopyField(TIFFTAG_GROUP3OPTIONS, g3opts); } else CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG); CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG); CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); break; } { uint32 len32; void** data; if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data)) TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data); } { uint16 ninks; const char* inknames; if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) { TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks); if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) { int inknameslen = strlen(inknames) + 1; const char* cp = inknames; while (ninks > 1) { cp = strchr(cp, '\0'); cp++; inknameslen += (strlen(cp) + 1); ninks--; } TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames); } } } { unsigned short pg0, pg1; if (pageInSeq == 1) { if (pageNum < 0) /* only one input file */ { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); } else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } else { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) { if (pageNum < 0) /* only one input file */ TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } } } for (p = tags; p < &tags[NTAGS]; p++) CopyTag(p->tag, p->count, p->type); cf = pickCopyFunc(in, out, bitspersample, samplesperpixel); return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE); } /* * Copy Functions. */ #define DECLAREcpFunc(x) \ static int x(TIFF* in, TIFF* out, \ uint32 imagelength, uint32 imagewidth, tsample_t spp) #define DECLAREreadFunc(x) \ static int x(TIFF* in, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*readFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); #define DECLAREwriteFunc(x) \ static int x(TIFF* out, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*writeFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); /* * Contig -> contig by scanline for rows/strip change. */ DECLAREcpFunc(cpContig2ContigByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } typedef void biasFn (void *image, void *bias, uint32 pixels); #define subtract(bits) \ static void subtract##bits (void *i, void *b, uint32 pixels)\ {\ uint##bits *image = i;\ uint##bits *bias = b;\ while (pixels--) {\ *image = *image > *bias ? *image-*bias : 0;\ image++, bias++; \ } \ } subtract(8) subtract(16) subtract(32) static biasFn *lineSubtractFn (unsigned bits) { switch (bits) { case 8: return subtract8; case 16: return subtract16; case 32: return subtract32; } return NULL; } /* * Contig -> contig by scanline while subtracting a bias image. */ DECLAREcpFunc(cpBiasedContig2Contig) { if (spp == 1) { tsize_t biasSize = TIFFScanlineSize(bias); tsize_t bufSize = TIFFScanlineSize(in); tdata_t buf, biasBuf; uint32 biasWidth = 0, biasLength = 0; TIFFGetField(bias, TIFFTAG_IMAGEWIDTH, &biasWidth); TIFFGetField(bias, TIFFTAG_IMAGELENGTH, &biasLength); if (biasSize == bufSize && imagelength == biasLength && imagewidth == biasWidth) { uint16 sampleBits = 0; biasFn *subtractLine; TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &sampleBits); subtractLine = lineSubtractFn (sampleBits); if (subtractLine) { uint32 row; buf = _TIFFmalloc(bufSize); biasBuf = _TIFFmalloc(bufSize); for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFReadScanline(bias, biasBuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read biased scanline %lu", (unsigned long) row); goto bad; } subtractLine (buf, biasBuf, imagewidth); if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); _TIFFfree(biasBuf); TIFFSetDirectory(bias, TIFFCurrentDirectory(bias)); /* rewind */ return 1; bad: _TIFFfree(buf); _TIFFfree(biasBuf); return 0; } else { TIFFError(TIFFFileName(in), "No support for biasing %d bit pixels\n", sampleBits); return 0; } } TIFFError(TIFFFileName(in), "Bias image %s,%d\nis not the same size as %s,%d\n", TIFFFileName(bias), TIFFCurrentDirectory(bias), TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } else { TIFFError(TIFFFileName(in), "Can't bias %s,%d as it has >1 Sample/Pixel\n", TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } } /* * Strip -> strip for change in encoding. */ DECLAREcpFunc(cpDecodedStrips) { tsize_t stripsize = TIFFStripSize(in); tdata_t buf = _TIFFmalloc(stripsize); (void) imagewidth; (void) spp; if (buf) { tstrip_t s, ns = TIFFNumberOfStrips(in); uint32 row = 0; _TIFFmemset(buf, 0, stripsize); for (s = 0; s < ns && row < imagelength; s++) { tsize_t cc = (row + rowsperstrip > imagelength) ? TIFFVStripSize(in, imagelength - row) : stripsize; if (TIFFReadEncodedStrip(in, s, buf, cc) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu", (unsigned long) s); goto bad; } if (TIFFWriteEncodedStrip(out, s, buf, cc) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %lu", (unsigned long) s); goto bad; } row += rowsperstrip; } _TIFFfree(buf); return 1; } else { TIFFError(TIFFFileName(in), "Error, can't allocate memory buffer of size %lu " "to read strips", (unsigned long) stripsize); return 0; } bad: _TIFFfree(buf); return 0; } /* * Separate -> separate by row for rows/strip change. */ DECLAREcpFunc(cpSeparate2SeparateByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; tsample_t s; (void) imagewidth; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } /* * Contig -> separate by row. */ DECLAREcpFunc(cpContig2SeparateByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); /* unpack channels */ for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, inbuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = ((uint8*)inbuf) + s; outp = (uint8*)outbuf; for (n = imagewidth; n-- > 0;) { *outp++ = *inp; inp += spp; } if (TIFFWriteScanline(out, outbuf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } /* * Separate -> contig by row. */ DECLAREcpFunc(cpSeparate2ContigByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { if (TIFFReadScanline(in, inbuf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = (uint8*)inbuf; outp = ((uint8*)outbuf) + s; for (n = imagewidth; n-- > 0;) { *outp = *inp++; outp += spp; } } if (TIFFWriteScanline(out, outbuf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } static void cpStripToTile(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) *out++ = *in++; out += outskew; in += inskew; } } static void cpContigBufToSeparateBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample ) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } in += (spp-1) * bytes_per_sample; } out += outskew; in += inskew; } } static void cpSeparateBufToContigBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } out += (spp-1)*bytes_per_sample; } out += outskew; in += inskew; } } static int cpImage(TIFF* in, TIFF* out, readFunc fin, writeFunc fout, uint32 imagelength, uint32 imagewidth, tsample_t spp) { int status = 0; tdata_t buf = NULL; tsize_t scanlinesize = TIFFRasterScanlineSize(in); tsize_t bytes = scanlinesize * (tsize_t)imagelength; /* * XXX: Check for integer overflow. */ if (scanlinesize && imagelength && bytes / (tsize_t)imagelength == scanlinesize) { buf = _TIFFmalloc(bytes); if (buf) { if ((*fin)(in, (uint8*)buf, imagelength, imagewidth, spp)) { status = (*fout)(out, (uint8*)buf, imagelength, imagewidth, spp); } _TIFFfree(buf); } else { TIFFError(TIFFFileName(in), "Error, can't allocate space for image buffer"); } } else { TIFFError(TIFFFileName(in), "Error, no space for image buffer"); } return status; } DECLAREreadFunc(readContigStripsIntoBuffer) { tsize_t scanlinesize = TIFFScanlineSize(in); uint8* bufp = buf; uint32 row; (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, (tdata_t) bufp, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); return 0; } bufp += scanlinesize; } return 1; } DECLAREreadFunc(readSeparateStripsIntoBuffer) { int status = 1; tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t scanline; if (!scanlinesize) return 0; scanline = _TIFFmalloc(scanlinesize); if (!scanline) return 0; _TIFFmemset(scanline, 0, scanlinesize); (void) imagewidth; if (scanline) { uint8* bufp = (uint8*) buf; uint32 row; tsample_t s; for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { uint8* bp = bufp + s; tsize_t n = scanlinesize; uint8* sbuf = scanline; if (TIFFReadScanline(in, scanline, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); status = 0; goto done; } while (n-- > 0) *bp = *sbuf++, bp += spp; } bufp += scanlinesize * spp; } } done: _TIFFfree(scanline); return status; } DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb + tilew > imagew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREreadFunc(readSeparateTilesIntoBuffer) { int status = 1; uint32 imagew = TIFFRasterScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew*spp; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; uint16 bps, bytes_per_sample; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { if (TIFFReadTile(in, tilebuf, col, row, 0, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu, " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); status = 0; goto done; } /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew*spp > imagew) { uint32 width = imagew - colb; int oskew = tilew*spp - width; cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, width/(spp*bytes_per_sample), oskew + iskew, oskew/spp, spp, bytes_per_sample); } else cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, tw, iskew, 0, spp, bytes_per_sample); } colb += tilew*spp; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREwriteFunc(writeBufferToContigStrips) { uint32 row, rowsperstrip; tstrip_t strip = 0; (void) imagewidth; (void) spp; (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); if (TIFFWriteEncodedStrip(out, strip++, buf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); return 0; } buf += stripsize; } return 1; } DECLAREwriteFunc(writeBufferToSeparateStrips) { uint32 rowsize = imagewidth * spp; uint32 rowsperstrip; tsize_t stripsize = TIFFStripSize(out); tdata_t obuf; tstrip_t strip = 0; tsample_t s; obuf = _TIFFmalloc(stripsize); if (obuf == NULL) return (0); _TIFFmemset(obuf, 0, stripsize); (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (s = 0; s < spp; s++) { uint32 row; for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); cpContigBufToSeparateBuf( obuf, (uint8*) buf + row*rowsize + s, nrows, imagewidth, 0, 0, spp, 1); if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); _TIFFfree(obuf); return 0; } } } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToSeparateTiles) { uint32 imagew = TIFFScanlineSize(out); tsize_t tilew = TIFFTileRowSize(out); uint32 iimagew = TIFFRasterScanlineSize(out); int iskew = iimagew - tilew*spp; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; uint16 bps, bytes_per_sample; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps); assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = (imagew - colb); int oskew = tilew - width; cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, width/bytes_per_sample, oskew, (oskew*spp)+iskew, spp, bytes_per_sample); } else cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, tilewidth, 0, iskew, spp, bytes_per_sample); if (TIFFWriteTile(out, obuf, col, row, 0, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); _TIFFfree(obuf); return 0; } } colb += tilew; } bufp += nrow * iimagew; } _TIFFfree(obuf); return 1; } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigStrips2ContigTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig strips -> separate tiles. */ DECLAREcpFunc(cpContigStrips2SeparateTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate strips -> contig tiles. */ DECLAREcpFunc(cpSeparateStrips2ContigTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate strips -> separate tiles. */ DECLAREcpFunc(cpSeparateStrips2SeparateTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigTiles2ContigTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> separate tiles. */ DECLAREcpFunc(cpContigTiles2SeparateTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> contig tiles. */ DECLAREcpFunc(cpSeparateTiles2ContigTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> separate tiles (tile dimension change). */ DECLAREcpFunc(cpSeparateTiles2SeparateTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> contig tiles (tile dimension change). */ DECLAREcpFunc(cpContigTiles2ContigStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Contig tiles -> separate strips. */ DECLAREcpFunc(cpContigTiles2SeparateStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> contig strips. */ DECLAREcpFunc(cpSeparateTiles2ContigStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> separate strips. */ DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Select the appropriate copy function to use. */ static copyFunc pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
./CrossVul/dataset_final_sorted/CWE-191/c/good_4855_1
crossvul-cpp_data_bad_3345_0
/* * Edgeport USB Serial Converter driver * * Copyright (C) 2000-2002 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Supports the following devices: * EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT * * For questions or problems with this driver, contact Inside Out * Networks technical support, or Peter Berger <pberger@brimson.com>, * or Al Borchers <alborchers@steinerpoint.com>. */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/serial.h> #include <linux/swab.h> #include <linux/kfifo.h> #include <linux/ioctl.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "io_16654.h" #include "io_usbvend.h" #include "io_ti.h" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli" #define DRIVER_DESC "Edgeport USB Serial Driver" #define EPROM_PAGE_SIZE 64 /* different hardware types */ #define HARDWARE_TYPE_930 0 #define HARDWARE_TYPE_TIUMP 1 /* IOCTL_PRIVATE_TI_GET_MODE Definitions */ #define TI_MODE_CONFIGURING 0 /* Device has not entered start device */ #define TI_MODE_BOOT 1 /* Staying in boot mode */ #define TI_MODE_DOWNLOAD 2 /* Made it to download mode */ #define TI_MODE_TRANSITIONING 3 /* * Currently in boot mode but * transitioning to download mode */ /* read urb state */ #define EDGE_READ_URB_RUNNING 0 #define EDGE_READ_URB_STOPPING 1 #define EDGE_READ_URB_STOPPED 2 #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */ /* Product information read from the Edgeport */ struct product_info { int TiMode; /* Current TI Mode */ __u8 hardware_type; /* Type of hardware */ } __attribute__((packed)); /* * Edgeport firmware header * * "build_number" has been set to 0 in all three of the images I have * seen, and Digi Tech Support suggests that it is safe to ignore it. * * "length" is the number of bytes of actual data following the header. * * "checksum" is the low order byte resulting from adding the values of * all the data bytes. */ struct edgeport_fw_hdr { u8 major_version; u8 minor_version; __le16 build_number; __le16 length; u8 checksum; } __packed; struct edgeport_port { __u16 uart_base; __u16 dma_address; __u8 shadow_msr; __u8 shadow_mcr; __u8 shadow_lsr; __u8 lsr_mask; __u32 ump_read_timeout; /* * Number of milliseconds the UMP will * wait without data before completing * a read short */ int baud_rate; int close_pending; int lsr_event; struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ spinlock_t ep_lock; int ep_read_urb_state; int ep_write_urb_in_use; }; struct edgeport_serial { struct product_info product_info; u8 TI_I2C_Type; /* Type of I2C in UMP */ u8 TiReadI2C; /* * Set to TRUE if we have read the * I2c in Boot Mode */ struct mutex es_lock; int num_ports_open; struct usb_serial *serial; struct delayed_work heartbeat_work; int fw_version; bool use_heartbeat; }; /* Devices that this driver supports */ static const struct usb_device_id edgeport_1port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { } }; static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, /* The 4, 8 and 16 port devices show up as multiple 2 port devices */ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; /* Devices that this driver supports */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; MODULE_DEVICE_TABLE(usb, id_table_combined); static int closing_wait = EDGE_CLOSING_WAIT; static bool ignore_cpu_rev; static int default_uart_mode; /* RS232 */ static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data, int length); static void stop_read(struct edgeport_port *edge_port); static int restart_read(struct edgeport_port *edge_port); static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); static void edge_send(struct usb_serial_port *port, struct tty_struct *tty); static int do_download_mode(struct edgeport_serial *serial, const struct firmware *fw); static int do_boot_mode(struct edgeport_serial *serial, const struct firmware *fw); /* sysfs attributes */ static int edge_create_sysfs_attrs(struct usb_serial_port *port); static int edge_remove_sysfs_attrs(struct usb_serial_port *port); /* * Some release of Edgeport firmware "down3.bin" after version 4.80 * introduced code to automatically disconnect idle devices on some * Edgeport models after periods of inactivity, typically ~60 seconds. * This occurs without regard to whether ports on the device are open * or not. Digi International Tech Support suggested: * * 1. Adding driver "heartbeat" code to reset the firmware timer by * requesting a descriptor record every 15 seconds, which should be * effective with newer firmware versions that require it, and benign * with older versions that do not. In practice 40 seconds seems often * enough. * 2. The heartbeat code is currently required only on Edgeport/416 models. */ #define FW_HEARTBEAT_VERSION_CUTOFF ((4 << 8) + 80) #define FW_HEARTBEAT_SECS 40 /* Timeouts in msecs: firmware downloads take longer */ #define TI_VSEND_TIMEOUT_DEFAULT 1000 #define TI_VSEND_TIMEOUT_FW_DOWNLOAD 10000 static int ti_vread_sync(struct usb_device *dev, __u8 request, __u16 value, __u16 index, u8 *data, int size) { int status; status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN), value, index, data, size, 1000); if (status < 0) return status; if (status != size) { dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", __func__, size, status); return -ECOMM; } return 0; } static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value, u16 index, u8 *data, int size, int timeout) { int status; status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT), value, index, data, size, timeout); if (status < 0) return status; if (status != size) { dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", __func__, size, status); return -ECOMM; } return 0; } static int send_cmd(struct usb_device *dev, __u8 command, __u8 moduleid, __u16 value, u8 *data, int size) { return ti_vsend_sync(dev, command, value, moduleid, data, size, TI_VSEND_TIMEOUT_DEFAULT); } /* clear tx/rx buffers and fifo in TI UMP */ static int purge_port(struct usb_serial_port *port, __u16 mask) { int port_number = port->port_number; dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask); return send_cmd(port->serial->dev, UMPC_PURGE_PORT, (__u8)(UMPM_UART1_PORT + port_number), mask, NULL, 0); } /** * read_download_mem - Read edgeport memory from TI chip * @dev: usb device pointer * @start_address: Device CPU address at which to read * @length: Length of above data * @address_type: Can read both XDATA and I2C * @buffer: pointer to input data buffer */ static int read_download_mem(struct usb_device *dev, int start_address, int length, __u8 address_type, __u8 *buffer) { int status = 0; __u8 read_length; u16 be_start_address; dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length); /* * Read in blocks of 64 bytes * (TI firmware can't handle more than 64 byte reads) */ while (length) { if (length > 64) read_length = 64; else read_length = (__u8)length; if (read_length > 1) { dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length); } /* * NOTE: Must use swab as wIndex is sent in little-endian * byte order regardless of host byte order. */ be_start_address = swab16((u16)start_address); status = ti_vread_sync(dev, UMPC_MEMORY_READ, (__u16)address_type, be_start_address, buffer, read_length); if (status) { dev_dbg(&dev->dev, "%s - ERROR %x\n", __func__, status); return status; } if (read_length > 1) usb_serial_debug_data(&dev->dev, __func__, read_length, buffer); /* Update pointers/length */ start_address += read_length; buffer += read_length; length -= read_length; } return status; } static int read_ram(struct usb_device *dev, int start_address, int length, __u8 *buffer) { return read_download_mem(dev, start_address, length, DTK_ADDR_SPACE_XDATA, buffer); } /* Read edgeport memory to a given block */ static int read_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; for (i = 0; i < length; i++) { status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, serial->TI_I2C_Type, (__u16)(start_address+i), &buffer[i], 0x01); if (status) { dev_dbg(&serial->serial->dev->dev, "%s - ERROR %x\n", __func__, status); return status; } } dev_dbg(&serial->serial->dev->dev, "%s - start_address = %x, length = %d\n", __func__, start_address, length); usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer); serial->TiReadI2C = 1; return status; } /* Write given block to TI EPROM memory */ static int write_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; u8 *temp; /* Must do a read before write */ if (!serial->TiReadI2C) { temp = kmalloc(1, GFP_KERNEL); if (!temp) return -ENOMEM; status = read_boot_mem(serial, 0, 1, temp); kfree(temp); if (status) return status; } for (i = 0; i < length; ++i) { status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, buffer[i], (u16)(i + start_address), NULL, 0, TI_VSEND_TIMEOUT_DEFAULT); if (status) return status; } dev_dbg(&serial->serial->dev->dev, "%s - start_sddr = %x, length = %d\n", __func__, start_address, length); usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer); return status; } /* Write edgeport I2C memory to TI chip */ static int write_i2c_mem(struct edgeport_serial *serial, int start_address, int length, __u8 address_type, __u8 *buffer) { struct device *dev = &serial->serial->dev->dev; int status = 0; int write_length; u16 be_start_address; /* We can only send a maximum of 1 aligned byte page at a time */ /* calculate the number of bytes left in the first page */ write_length = EPROM_PAGE_SIZE - (start_address & (EPROM_PAGE_SIZE - 1)); if (write_length > length) write_length = length; dev_dbg(dev, "%s - BytesInFirstPage Addr = %x, length = %d\n", __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); /* * Write first page. * * NOTE: Must use swab as wIndex is sent in little-endian byte order * regardless of host byte order. */ be_start_address = swab16((u16)start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (u16)address_type, be_start_address, buffer, write_length, TI_VSEND_TIMEOUT_DEFAULT); if (status) { dev_dbg(dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; /* * We should be aligned now -- can write max page size bytes at a * time. */ while (length) { if (length > EPROM_PAGE_SIZE) write_length = EPROM_PAGE_SIZE; else write_length = length; dev_dbg(dev, "%s - Page Write Addr = %x, length = %d\n", __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); /* * Write next page. * * NOTE: Must use swab as wIndex is sent in little-endian byte * order regardless of host byte order. */ be_start_address = swab16((u16)start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (u16)address_type, be_start_address, buffer, write_length, TI_VSEND_TIMEOUT_DEFAULT); if (status) { dev_err(dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; } return status; } /* * Examine the UMP DMA registers and LSR * * Check the MSBit of the X and Y DMA byte count registers. * A zero in this bit indicates that the TX DMA buffers are empty * then check the TX Empty bit in the UART. */ static int tx_active(struct edgeport_port *port) { int status; struct out_endpoint_desc_block *oedb; __u8 *lsr; int bytes_left = 0; oedb = kmalloc(sizeof(*oedb), GFP_KERNEL); if (!oedb) return -ENOMEM; /* * Sigh, that's right, just one byte, as not all platforms can * do DMA from stack */ lsr = kmalloc(1, GFP_KERNEL); if (!lsr) { kfree(oedb); return -ENOMEM; } /* Read the DMA Count Registers */ status = read_ram(port->port->serial->dev, port->dma_address, sizeof(*oedb), (void *)oedb); if (status) goto exit_is_tx_active; dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount); /* and the LSR */ status = read_ram(port->port->serial->dev, port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr); if (status) goto exit_is_tx_active; dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr); /* If either buffer has data or we are transmitting then return TRUE */ if ((oedb->XByteCount & 0x80) != 0) bytes_left += 64; if ((*lsr & UMP_UART_LSR_TX_MASK) == 0) bytes_left += 1; /* We return Not Active if we get any kind of error */ exit_is_tx_active: dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left); kfree(lsr); kfree(oedb); return bytes_left; } static int choose_config(struct usb_device *dev) { /* * There may be multiple configurations on this device, in which case * we would need to read and parse all of them to find out which one * we want. However, we just support one config at this point, * configuration # 1, which is Config Descriptor 0. */ dev_dbg(&dev->dev, "%s - Number of Interfaces = %d\n", __func__, dev->config->desc.bNumInterfaces); dev_dbg(&dev->dev, "%s - MAX Power = %d\n", __func__, dev->config->desc.bMaxPower * 2); if (dev->config->desc.bNumInterfaces != 1) { dev_err(&dev->dev, "%s - bNumInterfaces is not 1, ERROR!\n", __func__); return -ENODEV; } return 0; } static int read_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status; if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) { status = read_download_mem(serial->serial->dev, start_address, length, serial->TI_I2C_Type, buffer); } else { status = read_boot_mem(serial, start_address, length, buffer); } return status; } static int write_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { if (serial->product_info.TiMode == TI_MODE_BOOT) return write_boot_mem(serial, start_address, length, buffer); if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) return write_i2c_mem(serial, start_address, length, serial->TI_I2C_Type, buffer); return -EINVAL; } /* Read a descriptor header from I2C based on type */ static int get_descriptor_addr(struct edgeport_serial *serial, int desc_type, struct ti_i2c_desc *rom_desc) { int start_address; int status; /* Search for requested descriptor in I2C */ start_address = 2; do { status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) return 0; if (rom_desc->Type == desc_type) return start_address; start_address = start_address + sizeof(struct ti_i2c_desc) + le16_to_cpu(rom_desc->Size); } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); return 0; } /* Validate descriptor checksum */ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer) { __u16 i; __u8 cs = 0; for (i = 0; i < le16_to_cpu(rom_desc->Size); i++) cs = (__u8)(cs + buffer[i]); if (cs != rom_desc->CheckSum) { pr_debug("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs); return -EINVAL; } return 0; } /* Make sure that the I2C image is good */ static int check_i2c_image(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status = 0; struct ti_i2c_desc *rom_desc; int start_address = 2; __u8 *buffer; __u16 ttype; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) return -ENOMEM; buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL); if (!buffer) { kfree(rom_desc); return -ENOMEM; } /* Read the first byte (Signature0) must be 0x52 or 0x10 */ status = read_rom(serial, 0, 1, buffer); if (status) goto out; if (*buffer != UMP5152 && *buffer != UMP3410) { dev_err(dev, "%s - invalid buffer signature\n", __func__); status = -ENODEV; goto out; } do { /* Validate the I2C */ status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) break; if ((start_address + sizeof(struct ti_i2c_desc) + le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) { status = -ENODEV; dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__); break; } dev_dbg(dev, "%s Type = 0x%x\n", __func__, rom_desc->Type); /* Skip type 2 record */ ttype = rom_desc->Type & 0x0f; if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC && ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) { /* Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), le16_to_cpu(rom_desc->Size), buffer); if (status) break; status = valid_csum(rom_desc, buffer); if (status) break; } start_address = start_address + sizeof(struct ti_i2c_desc) + le16_to_cpu(rom_desc->Size); } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && (start_address < TI_MAX_I2C_SIZE)); if ((rom_desc->Type != I2C_DESC_TYPE_ION) || (start_address > TI_MAX_I2C_SIZE)) status = -ENODEV; out: kfree(buffer); kfree(rom_desc); return status; } static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer) { int status; int start_address; struct ti_i2c_desc *rom_desc; struct edge_ti_manuf_descriptor *desc; struct device *dev = &serial->serial->dev->dev; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) return -ENOMEM; start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION, rom_desc); if (!start_address) { dev_dbg(dev, "%s - Edge Descriptor not found in I2C\n", __func__); status = -ENODEV; goto exit; } /* Read the descriptor data */ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), le16_to_cpu(rom_desc->Size), buffer); if (status) goto exit; status = valid_csum(rom_desc, buffer); desc = (struct edge_ti_manuf_descriptor *)buffer; dev_dbg(dev, "%s - IonConfig 0x%x\n", __func__, desc->IonConfig); dev_dbg(dev, "%s - Version %d\n", __func__, desc->Version); dev_dbg(dev, "%s - Cpu/Board 0x%x\n", __func__, desc->CpuRev_BoardRev); dev_dbg(dev, "%s - NumPorts %d\n", __func__, desc->NumPorts); dev_dbg(dev, "%s - NumVirtualPorts %d\n", __func__, desc->NumVirtualPorts); dev_dbg(dev, "%s - TotalPorts %d\n", __func__, desc->TotalPorts); exit: kfree(rom_desc); return status; } /* Build firmware header used for firmware update */ static int build_i2c_fw_hdr(u8 *header, const struct firmware *fw) { __u8 *buffer; int buffer_size; int i; __u8 cs = 0; struct ti_i2c_desc *i2c_header; struct ti_i2c_image_header *img_header; struct ti_i2c_firmware_rec *firmware_rec; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; /* * In order to update the I2C firmware we must change the type 2 record * to type 0xF2. This will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will download the latest * firmware (padded to 15.5k) into the UMP ram. And finally when the * device comes back up in download mode the driver will cause the new * firmware to be copied from the UMP Ram to I2C and the firmware will * update the record type from 0xf2 to 0x02. */ /* * Allocate a 15.5k buffer + 2 bytes for version number (Firmware * Record) */ buffer_size = (((1024 * 16) - 512 ) + sizeof(struct ti_i2c_firmware_rec)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Set entire image of 0xffs */ memset(buffer, 0xff, buffer_size); /* Copy version number into firmware record */ firmware_rec = (struct ti_i2c_firmware_rec *)buffer; firmware_rec->Ver_Major = fw_hdr->major_version; firmware_rec->Ver_Minor = fw_hdr->minor_version; /* Pointer to fw_down memory image */ img_header = (struct ti_i2c_image_header *)&fw->data[4]; memcpy(buffer + sizeof(struct ti_i2c_firmware_rec), &fw->data[4 + sizeof(struct ti_i2c_image_header)], le16_to_cpu(img_header->Length)); for (i=0; i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } kfree(buffer); /* Build new header */ i2c_header = (struct ti_i2c_desc *)header; firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; i2c_header->Size = cpu_to_le16(buffer_size); i2c_header->CheckSum = cs; firmware_rec->Ver_Major = fw_hdr->major_version; firmware_rec->Ver_Minor = fw_hdr->minor_version; return 0; } /* Try to figure out what type of I2c we have */ static int i2c_type_bootmode(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status; u8 *data; data = kmalloc(1, GFP_KERNEL); if (!data) return -ENOMEM; /* Try to read type 2 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01); if (status) dev_dbg(dev, "%s - read 2 status error = %d\n", __func__, status); else dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dev_dbg(dev, "%s - ROM_TYPE_II\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto out; } /* Try to read type 3 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01); if (status) dev_dbg(dev, "%s - read 3 status error = %d\n", __func__, status); else dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dev_dbg(dev, "%s - ROM_TYPE_III\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; goto out; } dev_dbg(dev, "%s - Unknown\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = -ENODEV; out: kfree(data); return status; } static int bulk_xfer(struct usb_serial *serial, void *buffer, int length, int *num_sent) { int status; status = usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, serial->port[0]->bulk_out_endpointAddress), buffer, length, num_sent, 1000); return status; } /* Download given firmware image to the device (IN BOOT MODE) */ static int download_code(struct edgeport_serial *serial, __u8 *image, int image_length) { int status = 0; int pos; int transfer; int done; /* Transfer firmware image */ for (pos = 0; pos < image_length; ) { /* Read the next buffer from file */ transfer = image_length - pos; if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE) transfer = EDGE_FW_BULK_MAX_PACKET_SIZE; /* Transfer data */ status = bulk_xfer(serial->serial, &image[pos], transfer, &done); if (status) break; /* Advance buffer pointer */ pos += done; } return status; } /* FIXME!!! */ static int config_boot_dev(struct usb_device *dev) { return 0; } static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc) { return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev); } static int check_fw_sanity(struct edgeport_serial *serial, const struct firmware *fw) { u16 length_total; u8 checksum = 0; int pos; struct device *dev = &serial->serial->interface->dev; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; if (fw->size < sizeof(struct edgeport_fw_hdr)) { dev_err(dev, "incomplete fw header\n"); return -EINVAL; } length_total = le16_to_cpu(fw_hdr->length) + sizeof(struct edgeport_fw_hdr); if (fw->size != length_total) { dev_err(dev, "bad fw size (expected: %u, got: %zu)\n", length_total, fw->size); return -EINVAL; } for (pos = sizeof(struct edgeport_fw_hdr); pos < fw->size; ++pos) checksum += fw->data[pos]; if (checksum != fw_hdr->checksum) { dev_err(dev, "bad fw checksum (expected: 0x%x, got: 0x%x)\n", fw_hdr->checksum, checksum); return -EINVAL; } return 0; } /* * DownloadTIFirmware - Download run-time operating firmware to the TI5052 * * This routine downloads the main operating code into the TI5052, using the * boot code already burned into E2PROM or ROM. */ static int download_fw(struct edgeport_serial *serial) { struct device *dev = &serial->serial->interface->dev; int status = 0; struct usb_interface_descriptor *interface; const struct firmware *fw; const char *fw_name = "edgeport/down3.bin"; struct edgeport_fw_hdr *fw_hdr; status = request_firmware(&fw, fw_name, dev); if (status) { dev_err(dev, "Failed to load image \"%s\" err %d\n", fw_name, status); return status; } if (check_fw_sanity(serial, fw)) { status = -EINVAL; goto out; } fw_hdr = (struct edgeport_fw_hdr *)fw->data; /* If on-board version is newer, "fw_version" will be updated later. */ serial->fw_version = (fw_hdr->major_version << 8) + fw_hdr->minor_version; /* * This routine is entered by both the BOOT mode and the Download mode * We can determine which code is running by the reading the config * descriptor and if we have only one bulk pipe it is in boot mode */ serial->product_info.hardware_type = HARDWARE_TYPE_TIUMP; /* Default to type 2 i2c */ serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = choose_config(serial->serial->dev); if (status) goto out; interface = &serial->serial->interface->cur_altsetting->desc; if (!interface) { dev_err(dev, "%s - no interface set, error!\n", __func__); status = -ENODEV; goto out; } /* * Setup initial mode -- the default mode 0 is TI_MODE_CONFIGURING * if we have more than one endpoint we are definitely in download * mode */ if (interface->bNumEndpoints > 1) { serial->product_info.TiMode = TI_MODE_DOWNLOAD; status = do_download_mode(serial, fw); } else { /* Otherwise we will remain in configuring mode */ serial->product_info.TiMode = TI_MODE_CONFIGURING; status = do_boot_mode(serial, fw); } out: release_firmware(fw); return status; } static int do_download_mode(struct edgeport_serial *serial, const struct firmware *fw) { struct device *dev = &serial->serial->interface->dev; int status = 0; int start_address; struct edge_ti_manuf_descriptor *ti_manuf_desc; int download_cur_ver; int download_new_ver; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; struct ti_i2c_desc *rom_desc; dev_dbg(dev, "%s - RUNNING IN DOWNLOAD MODE\n", __func__); status = check_i2c_image(serial); if (status) { dev_dbg(dev, "%s - DOWNLOAD MODE -- BAD I2C\n", __func__); return status; } /* * Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) return -ENOMEM; status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); return status; } /* Check version number of ION descriptor */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); return -EINVAL; } rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { kfree(ti_manuf_desc); return -ENOMEM; } /* Search for type 2 record (firmware record) */ start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); if (start_address != 0) { struct ti_i2c_firmware_rec *firmware_version; u8 *record; dev_dbg(dev, "%s - Found Type FIRMWARE (Type 2) record\n", __func__); firmware_version = kmalloc(sizeof(*firmware_version), GFP_KERNEL); if (!firmware_version) { kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* * Validate version number * Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), sizeof(struct ti_i2c_firmware_rec), (__u8 *)firmware_version); if (status) { kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* * Check version number of download with current * version in I2c */ download_cur_ver = (firmware_version->Ver_Major << 8) + (firmware_version->Ver_Minor); download_new_ver = (fw_hdr->major_version << 8) + (fw_hdr->minor_version); dev_dbg(dev, "%s - >> FW Versions Device %d.%d Driver %d.%d\n", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, fw_hdr->major_version, fw_hdr->minor_version); /* * Check if we have an old version in the I2C and * update if necessary */ if (download_cur_ver < download_new_ver) { dev_dbg(dev, "%s - Update I2C dld from %d.%d to %d.%d\n", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, fw_hdr->major_version, fw_hdr->minor_version); record = kmalloc(1, GFP_KERNEL); if (!record) { kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* * In order to update the I2C firmware we must * change the type 2 record to type 0xF2. This * will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will * download the latest firmware (padded to * 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the * driver will cause the new firmware to be * copied from the UMP Ram to I2C and the * firmware will update the record type from * 0xf2 to 0x02. */ *record = I2C_DESC_TYPE_FIRMWARE_BLANK; /* * Change the I2C Firmware record type to * 0xf2 to trigger an update */ status = write_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* * verify the write -- must do this in order * for write to complete before we do the * hardware reset */ status = read_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) { dev_err(dev, "%s - error resetting device\n", __func__); kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } dev_dbg(dev, "%s - HARDWARE RESET\n", __func__); /* Reset UMP -- Back to BOOT MODE */ status = ti_vsend_sync(serial->serial->dev, UMPC_HARDWARE_RESET, 0, 0, NULL, 0, TI_VSEND_TIMEOUT_DEFAULT); dev_dbg(dev, "%s - HARDWARE RESET return %d\n", __func__, status); /* return an error on purpose. */ kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } /* Same or newer fw version is already loaded */ serial->fw_version = download_cur_ver; kfree(firmware_version); } /* Search for type 0xF2 record (firmware blank record) */ else { start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BLANK, rom_desc); if (start_address != 0) { #define HEADER_SIZE (sizeof(struct ti_i2c_desc) + \ sizeof(struct ti_i2c_firmware_rec)) __u8 *header; __u8 *vheader; header = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!header) { kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } vheader = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!vheader) { kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } dev_dbg(dev, "%s - Found Type BLANK FIRMWARE (Type F2) record\n", __func__); /* * In order to update the I2C firmware we must change * the type 2 record to type 0xF2. This will force the * UMP to come up in Boot Mode. Then while in boot * mode, the driver will download the latest firmware * (padded to 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the driver * will cause the new firmware to be copied from the * UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. */ status = build_i2c_fw_hdr(header, fw); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* * Update I2C with type 0xf2 record with correct * size and checksum */ status = write_rom(serial, start_address, HEADER_SIZE, header); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* * verify the write -- must do this in order for * write to complete before we do the hardware reset */ status = read_rom(serial, start_address, HEADER_SIZE, vheader); if (status) { dev_dbg(dev, "%s - can't read header back\n", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (memcmp(vheader, header, HEADER_SIZE)) { dev_dbg(dev, "%s - write download record failed\n", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } kfree(vheader); kfree(header); dev_dbg(dev, "%s - Start firmware update\n", __func__); /* Tell firmware to copy download image into I2C */ status = ti_vsend_sync(serial->serial->dev, UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0, TI_VSEND_TIMEOUT_FW_DOWNLOAD); dev_dbg(dev, "%s - Update complete 0x%x\n", __func__, status); if (status) { dev_err(dev, "%s - UMPC_COPY_DNLD_TO_I2C failed\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return status; } } } /* The device is running the download code */ kfree(rom_desc); kfree(ti_manuf_desc); return 0; } static int do_boot_mode(struct edgeport_serial *serial, const struct firmware *fw) { struct device *dev = &serial->serial->interface->dev; int status = 0; struct edge_ti_manuf_descriptor *ti_manuf_desc; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; dev_dbg(dev, "%s - RUNNING IN BOOT MODE\n", __func__); /* Configure the TI device so we can use the BULK pipes for download */ status = config_boot_dev(serial->serial->dev); if (status) return status; if (le16_to_cpu(serial->serial->dev->descriptor.idVendor) != USB_VENDOR_ID_ION) { dev_dbg(dev, "%s - VID = 0x%x\n", __func__, le16_to_cpu(serial->serial->dev->descriptor.idVendor)); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto stayinbootmode; } /* * We have an ION device (I2c Must be programmed) * Determine I2C image type */ if (i2c_type_bootmode(serial)) goto stayinbootmode; /* Check for ION Vendor ID and that the I2C is valid */ if (!check_i2c_image(serial)) { struct ti_i2c_image_header *header; int i; __u8 cs = 0; __u8 *buffer; int buffer_size; /* * Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) return -ENOMEM; status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); goto stayinbootmode; } /* Check for version 2 */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); goto stayinbootmode; } kfree(ti_manuf_desc); /* * In order to update the I2C firmware we must change the type * 2 record to type 0xF2. This will force the UMP to come up * in Boot Mode. Then while in boot mode, the driver will * download the latest firmware (padded to 15.5k) into the * UMP ram. Finally when the device comes back up in download * mode the driver will cause the new firmware to be copied * from the UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. * * Do we really have to copy the whole firmware image, * or could we do this in place! */ /* Allocate a 15.5k buffer + 3 byte header */ buffer_size = (((1024 * 16) - 512) + sizeof(struct ti_i2c_image_header)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Initialize the buffer to 0xff (pad the buffer) */ memset(buffer, 0xff, buffer_size); memcpy(buffer, &fw->data[4], fw->size - 4); for (i = sizeof(struct ti_i2c_image_header); i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } header = (struct ti_i2c_image_header *)buffer; /* update length and checksum after padding */ header->Length = cpu_to_le16((__u16)(buffer_size - sizeof(struct ti_i2c_image_header))); header->CheckSum = cs; /* Download the operational code */ dev_dbg(dev, "%s - Downloading operational code image version %d.%d (TI UMP)\n", __func__, fw_hdr->major_version, fw_hdr->minor_version); status = download_code(serial, buffer, buffer_size); kfree(buffer); if (status) { dev_dbg(dev, "%s - Error downloading operational code image\n", __func__); return status; } /* Device will reboot */ serial->product_info.TiMode = TI_MODE_TRANSITIONING; dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); return 1; } stayinbootmode: /* Eprom is invalid or blank stay in boot mode */ dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); serial->product_info.TiMode = TI_MODE_BOOT; return 1; } static int ti_do_config(struct edgeport_port *port, int feature, int on) { int port_number = port->port->port_number; on = !!on; /* 1 or 0 not bitmask */ return send_cmd(port->port->serial->dev, feature, (__u8)(UMPM_UART1_PORT + port_number), on, NULL, 0); } static int restore_mcr(struct edgeport_port *port, __u8 mcr) { int status = 0; dev_dbg(&port->port->dev, "%s - %x\n", __func__, mcr); status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR); if (status) return status; status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS); if (status) return status; return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK); } /* Convert TI LSR to standard UART flags */ static __u8 map_line_status(__u8 ti_lsr) { __u8 lsr = 0; #define MAP_FLAG(flagUmp, flagUart) \ if (ti_lsr & flagUmp) \ lsr |= flagUart; MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */ MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */ MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */ MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */ MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */ MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */ #undef MAP_FLAG return lsr; } static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) { struct async_icount *icount; struct tty_struct *tty; dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, msr); if (msr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR | EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) { icount = &edge_port->port->icount; /* update input line counters */ if (msr & EDGEPORT_MSR_DELTA_CTS) icount->cts++; if (msr & EDGEPORT_MSR_DELTA_DSR) icount->dsr++; if (msr & EDGEPORT_MSR_DELTA_CD) icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&edge_port->port->port.delta_msr_wait); } /* Save the new modem status */ edge_port->shadow_msr = msr & 0xf0; tty = tty_port_tty_get(&edge_port->port->port); /* handle CTS flow control */ if (tty && C_CRTSCTS(tty)) { if (msr & EDGEPORT_MSR_CTS) tty_wakeup(tty); } tty_kref_put(tty); } static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data, __u8 lsr, __u8 data) { struct async_icount *icount; __u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)); dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr); edge_port->shadow_lsr = lsr; if (new_lsr & LSR_BREAK) /* * Parity and Framing errors only count if they * occur exclusive of a break being received. */ new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK); /* Place LSR data byte into Rx buffer */ if (lsr_data) edge_tty_recv(edge_port->port, &data, 1); /* update input line counters */ icount = &edge_port->port->icount; if (new_lsr & LSR_BREAK) icount->brk++; if (new_lsr & LSR_OVER_ERR) icount->overrun++; if (new_lsr & LSR_PAR_ERR) icount->parity++; if (new_lsr & LSR_FRM_ERR) icount->frame++; } static void edge_interrupt_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct usb_serial_port *port; struct edgeport_port *edge_port; struct device *dev; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int port_number; int function; int retval; __u8 lsr; __u8 msr; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero urb status received: " "%d\n", __func__, status); goto exit; } if (!length) { dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__); goto exit; } dev = &edge_serial->serial->dev->dev; usb_serial_debug_data(dev, __func__, length, data); if (length != 2) { dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length); goto exit; } port_number = TIUMP_GET_PORT_FROM_CODE(data[0]); function = TIUMP_GET_FUNC_FROM_CODE(data[0]); dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, port_number, function, data[1]); if (port_number >= edge_serial->serial->num_ports) { dev_err(dev, "bad port number %d\n", port_number); goto exit; } port = edge_serial->serial->port[port_number]; edge_port = usb_get_serial_port_data(port); if (!edge_port) { dev_dbg(dev, "%s - edge_port not found\n", __func__); return; } switch (function) { case TIUMP_INTERRUPT_CODE_LSR: lsr = map_line_status(data[1]); if (lsr & UMP_UART_LSR_DATA_MASK) { /* * Save the LSR event for bulk read completion routine */ dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n", __func__, port_number, lsr); edge_port->lsr_event = 1; edge_port->lsr_mask = lsr; } else { dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n", __func__, port_number, lsr); handle_new_lsr(edge_port, 0, lsr, 0); } break; case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ /* Copy MSR from UMP */ msr = data[1]; dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n", __func__, port_number, msr); handle_new_msr(edge_port, msr); break; default: dev_err(&urb->dev->dev, "%s - Unknown Interrupt code from UMP %x\n", __func__, data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_bulk_in_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; struct device *dev = &edge_port->port->dev; unsigned char *data = urb->transfer_buffer; int retval = 0; int port_number; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status); } if (status == -EPIPE) goto exit; if (status) { dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__); return; } port_number = edge_port->port->port_number; if (edge_port->lsr_event) { edge_port->lsr_event = 0; dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", __func__, port_number, edge_port->lsr_mask, *data); handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data); /* Adjust buffer length/pointer */ --urb->actual_length; ++data; } if (urb->actual_length) { usb_serial_debug_data(dev, __func__, urb->actual_length, data); if (edge_port->close_pending) dev_dbg(dev, "%s - close pending, dropping data on the floor\n", __func__); else edge_tty_recv(edge_port->port, data, urb->actual_length); edge_port->port->icount.rx += urb->actual_length; } exit: /* continue read unless stopped */ spin_lock(&edge_port->ep_lock); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) retval = usb_submit_urb(urb, GFP_ATOMIC); else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED; spin_unlock(&edge_port->ep_lock); if (retval) dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data, int length) { int queued; queued = tty_insert_flip_string(&port->port, data, length); if (queued < length) dev_err(&port->dev, "%s - dropping data, %d bytes lost\n", __func__, length - queued); tty_flip_buffer_push(&port->port); } static void edge_bulk_out_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status = urb->status; struct tty_struct *tty; edge_port->ep_write_urb_in_use = 0; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err_console(port, "%s - nonzero write bulk status " "received: %d\n", __func__, status); } /* send any buffered data */ tty = tty_port_tty_get(&port->port); edge_send(port, tty); tty_kref_put(tty); } static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial; struct usb_device *dev; struct urb *urb; int port_number; int status; u16 open_settings; u8 transaction_timeout; if (edge_port == NULL) return -ENODEV; port_number = port->port_number; dev = port->serial->dev; /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); if (status) { dev_err(&port->dev, "%s - cannot send clear loopback command, %d\n", __func__, status); return status; } /* set up the port settings */ if (tty) edge_set_termios(tty, port, &tty->termios); /* open up the port */ /* milliseconds to timeout for DMA transfer */ transaction_timeout = 2; edge_port->ump_read_timeout = max(20, ((transaction_timeout * 3) / 2)); /* milliseconds to timeout for DMA transfer */ open_settings = (u8)(UMP_DMA_MODE_CONTINOUS | UMP_PIPE_TRANS_TIMEOUT_ENA | (transaction_timeout << 2)); dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__); /* Tell TI to open and start the port */ status = send_cmd(dev, UMPC_OPEN_PORT, (u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command, %d\n", __func__, status); return status; } /* Start the DMA? */ status = send_cmd(dev, UMPC_START_PORT, (u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start DMA command, %d\n", __func__, status); return status; } /* Clear TX and RX buffers in UMP */ status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN); if (status) { dev_err(&port->dev, "%s - cannot send clear buffers command, %d\n", __func__, status); return status; } /* Read Initial MSR */ status = ti_vread_sync(dev, UMPC_READ_MSR, 0, (__u16)(UMPM_UART1_PORT + port_number), &edge_port->shadow_msr, 1); if (status) { dev_err(&port->dev, "%s - cannot send read MSR command, %d\n", __func__, status); return status; } dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr); /* Set Initial MCR */ edge_port->shadow_mcr = MCR_RTS | MCR_DTR; dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr); edge_serial = edge_port->edge_serial; if (mutex_lock_interruptible(&edge_serial->es_lock)) return -ERESTARTSYS; if (edge_serial->num_ports_open == 0) { /* we are the first port to open, post the interrupt urb */ urb = edge_serial->serial->port[0]->interrupt_in_urb; if (!urb) { dev_err(&port->dev, "%s - no interrupt urb present, exiting\n", __func__); status = -EINVAL; goto release_es_lock; } urb->context = edge_serial; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - usb_submit_urb failed with value %d\n", __func__, status); goto release_es_lock; } } /* * reset the data toggle on the bulk endpoints to work around bug in * host controllers where things get out of sync some times */ usb_clear_halt(dev, port->write_urb->pipe); usb_clear_halt(dev, port->read_urb->pipe); /* start up our bulk read urb */ urb = port->read_urb; if (!urb) { dev_err(&port->dev, "%s - no read urb present, exiting\n", __func__); status = -EINVAL; goto unlink_int_urb; } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; urb->context = edge_port; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - read bulk usb_submit_urb failed with value %d\n", __func__, status); goto unlink_int_urb; } ++edge_serial->num_ports_open; goto release_es_lock; unlink_int_urb: if (edge_port->edge_serial->num_ports_open == 0) usb_kill_urb(port->serial->port[0]->interrupt_in_urb); release_es_lock: mutex_unlock(&edge_serial->es_lock); return status; } static void edge_close(struct usb_serial_port *port) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; struct usb_serial *serial = port->serial; unsigned long flags; int port_number; edge_serial = usb_get_serial_data(port->serial); edge_port = usb_get_serial_port_data(port); if (edge_serial == NULL || edge_port == NULL) return; /* * The bulkreadcompletion routine will check * this flag and dump add read data */ edge_port->close_pending = 1; usb_kill_urb(port->read_urb); usb_kill_urb(port->write_urb); edge_port->ep_write_urb_in_use = 0; spin_lock_irqsave(&edge_port->ep_lock, flags); kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__); port_number = port->port_number; send_cmd(serial->dev, UMPC_CLOSE_PORT, (__u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); mutex_lock(&edge_serial->es_lock); --edge_port->edge_serial->num_ports_open; if (edge_port->edge_serial->num_ports_open <= 0) { /* last port is now closed, let's shut down our interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); edge_port->edge_serial->num_ports_open = 0; } mutex_unlock(&edge_serial->es_lock); edge_port->close_pending = 0; } static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); if (count == 0) { dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__); return 0; } if (edge_port == NULL) return -ENODEV; if (edge_port->close_pending == 1) return -ENODEV; count = kfifo_in_locked(&port->write_fifo, data, count, &edge_port->ep_lock); edge_send(port, tty); return count; } static void edge_send(struct usb_serial_port *port, struct tty_struct *tty) { int count, result; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_write_urb_in_use) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } count = kfifo_out(&port->write_fifo, port->write_urb->transfer_buffer, port->bulk_out_size); if (count == 0) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } edge_port->ep_write_urb_in_use = 1; spin_unlock_irqrestore(&edge_port->ep_lock, flags); usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer); /* set up our urb */ port->write_urb->transfer_buffer_length = count; /* send the data out the bulk port */ result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); edge_port->ep_write_urb_in_use = 0; /* TODO: reschedule edge_send */ } else edge_port->port->icount.tx += count; /* * wakeup any process waiting for writes to complete * there is now more room in the buffer for new writes */ if (tty) tty_wakeup(tty); } static int edge_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int room = 0; unsigned long flags; if (edge_port == NULL) return 0; if (edge_port->close_pending == 1) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); room = kfifo_avail(&port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, room); return room; } static int edge_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; if (edge_port == NULL) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); chars = kfifo_len(&port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } static bool edge_tx_empty(struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); int ret; ret = tx_active(edge_port); if (ret > 0) return false; return true; } static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = edge_write(tty, port, &stop_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status); } } /* * if we are implementing RTS/CTS, stop reads * and the Edgeport will clear the RTS line */ if (C_CRTSCTS(tty)) stop_read(edge_port); } static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status); } } /* * if we are implementing RTS/CTS, restart reads * are the Edgeport will assert the RTS line */ if (C_CRTSCTS(tty)) { status = restart_read(edge_port); if (status) dev_err(&port->dev, "%s - read bulk usb_submit_urb failed: %d\n", __func__, status); } } static void stop_read(struct edgeport_port *edge_port) { unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING; edge_port->shadow_mcr &= ~MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); } static int restart_read(struct edgeport_port *edge_port) { struct urb *urb; int status = 0; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) { urb = edge_port->port->read_urb; status = usb_submit_urb(urb, GFP_ATOMIC); } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; edge_port->shadow_mcr |= MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); return status; } static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, struct ktermios *old_termios) { struct device *dev = &edge_port->port->dev; struct ump_uart_config *config; int baud; unsigned cflag; int status; int port_number = edge_port->port->port_number; config = kmalloc (sizeof (*config), GFP_KERNEL); if (!config) { tty->termios = *old_termios; return; } cflag = tty->termios.c_cflag; config->wFlags = 0; /* These flags must be set */ config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT; config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR; config->bUartMode = (__u8)(edge_port->bUartMode); switch (cflag & CSIZE) { case CS5: config->bDataBits = UMP_UART_CHAR5BITS; dev_dbg(dev, "%s - data bits = 5\n", __func__); break; case CS6: config->bDataBits = UMP_UART_CHAR6BITS; dev_dbg(dev, "%s - data bits = 6\n", __func__); break; case CS7: config->bDataBits = UMP_UART_CHAR7BITS; dev_dbg(dev, "%s - data bits = 7\n", __func__); break; default: case CS8: config->bDataBits = UMP_UART_CHAR8BITS; dev_dbg(dev, "%s - data bits = 8\n", __func__); break; } if (cflag & PARENB) { if (cflag & PARODD) { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_ODDPARITY; dev_dbg(dev, "%s - parity = odd\n", __func__); } else { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_EVENPARITY; dev_dbg(dev, "%s - parity = even\n", __func__); } } else { config->bParity = UMP_UART_NOPARITY; dev_dbg(dev, "%s - parity = none\n", __func__); } if (cflag & CSTOPB) { config->bStopBits = UMP_UART_STOPBIT2; dev_dbg(dev, "%s - stop bits = 2\n", __func__); } else { config->bStopBits = UMP_UART_STOPBIT1; dev_dbg(dev, "%s - stop bits = 1\n", __func__); } /* figure out the flow control settings */ if (cflag & CRTSCTS) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW; config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW; dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__); } else { dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__); restart_read(edge_port); } /* * if we are implementing XON/XOFF, set the start and stop * character in the device */ config->cXon = START_CHAR(tty); config->cXoff = STOP_CHAR(tty); /* if we are implementing INBOUND XON/XOFF */ if (I_IXOFF(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_IN_X; dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, config->cXon, config->cXoff); } else dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__); /* if we are implementing OUTBOUND XON/XOFF */ if (I_IXON(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X; dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, config->cXon, config->cXoff); } else dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__); tty->termios.c_cflag &= ~CMSPAR; /* Round the baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ baud = 9600; } else tty_encode_baud_rate(tty, baud, baud); edge_port->baud_rate = baud; config->wBaudRate = (__u16)((461550L + baud/2) / baud); /* FIXME: Recompute actual baud from divisor here */ dev_dbg(dev, "%s - baud rate = %d, wBaudRate = %d\n", __func__, baud, config->wBaudRate); dev_dbg(dev, "wBaudRate: %d\n", (int)(461550L / config->wBaudRate)); dev_dbg(dev, "wFlags: 0x%x\n", config->wFlags); dev_dbg(dev, "bDataBits: %d\n", config->bDataBits); dev_dbg(dev, "bParity: %d\n", config->bParity); dev_dbg(dev, "bStopBits: %d\n", config->bStopBits); dev_dbg(dev, "cXon: %d\n", config->cXon); dev_dbg(dev, "cXoff: %d\n", config->cXoff); dev_dbg(dev, "bUartMode: %d\n", config->bUartMode); /* move the word values into big endian mode */ cpu_to_be16s(&config->wFlags); cpu_to_be16s(&config->wBaudRate); status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG, (__u8)(UMPM_UART1_PORT + port_number), 0, (__u8 *)config, sizeof(*config)); if (status) dev_dbg(dev, "%s - error %d when trying to write config to device\n", __func__, status); kfree(config); } static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int cflag; cflag = tty->termios.c_cflag; dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__, tty->termios.c_cflag, tty->termios.c_iflag); dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__, old_termios->c_cflag, old_termios->c_iflag); if (edge_port == NULL) return; /* change the port settings to the new ones specified */ change_port_settings(tty, edge_port, old_termios); } static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); mcr = edge_port->shadow_mcr; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; edge_port->shadow_mcr = mcr; spin_unlock_irqrestore(&edge_port->ep_lock, flags); restore_mcr(edge_port, mcr); return 0; } static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); msr = edge_port->shadow_msr; mcr = edge_port->shadow_mcr; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ dev_dbg(&port->dev, "%s -- %x\n", __func__, result); spin_unlock_irqrestore(&edge_port->ep_lock, flags); return result; } static int get_serial_info(struct edgeport_port *edge_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; unsigned cwait; cwait = edge_port->port->port.closing_wait; if (cwait != ASYNC_CLOSING_WAIT_NONE) cwait = jiffies_to_msecs(cwait) / 10; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = edge_port->port->minor; tmp.port = edge_port->port->port_number; tmp.irq = 0; tmp.xmit_fifo_size = edge_port->port->bulk_out_size; tmp.baud_base = 9600; tmp.close_delay = 5*HZ; tmp.closing_wait = cwait; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); switch (cmd) { case TIOCGSERIAL: dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__); return get_serial_info(edge_port, (struct serial_struct __user *) arg); } return -ENOIOCTLCMD; } static void edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; int bv = 0; /* Off */ if (break_state == -1) bv = 1; /* On */ status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv); if (status) dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n", __func__, status); } static void edge_heartbeat_schedule(struct edgeport_serial *edge_serial) { if (!edge_serial->use_heartbeat) return; schedule_delayed_work(&edge_serial->heartbeat_work, FW_HEARTBEAT_SECS * HZ); } static void edge_heartbeat_work(struct work_struct *work) { struct edgeport_serial *serial; struct ti_i2c_desc *rom_desc; serial = container_of(work, struct edgeport_serial, heartbeat_work.work); rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); /* Descriptor address request is enough to reset the firmware timer */ if (!rom_desc || !get_descriptor_addr(serial, I2C_DESC_TYPE_ION, rom_desc)) { dev_err(&serial->serial->interface->dev, "%s - Incomplete heartbeat\n", __func__); } kfree(rom_desc); edge_heartbeat_schedule(serial); } static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; int status; u16 product_id; /* Make sure we have the required endpoints when in download mode. */ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) { if (serial->num_bulk_in < serial->num_ports || serial->num_bulk_out < serial->num_ports) return -ENODEV; } /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (!edge_serial) return -ENOMEM; mutex_init(&edge_serial->es_lock); edge_serial->serial = serial; INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work); usb_set_serial_data(serial, edge_serial); status = download_fw(edge_serial); if (status < 0) { kfree(edge_serial); return status; } if (status > 0) return 1; /* bind but do not register any ports */ product_id = le16_to_cpu( edge_serial->serial->dev->descriptor.idProduct); /* Currently only the EP/416 models require heartbeat support */ if (edge_serial->fw_version > FW_HEARTBEAT_VERSION_CUTOFF) { if (product_id == ION_DEVICE_ID_TI_EDGEPORT_416 || product_id == ION_DEVICE_ID_TI_EDGEPORT_416B) { edge_serial->use_heartbeat = true; } } edge_heartbeat_schedule(edge_serial); return 0; } static void edge_disconnect(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); cancel_delayed_work_sync(&edge_serial->heartbeat_work); } static void edge_release(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); cancel_delayed_work_sync(&edge_serial->heartbeat_work); kfree(edge_serial); } static int edge_port_probe(struct usb_serial_port *port) { struct edgeport_port *edge_port; int ret; edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL); if (!edge_port) return -ENOMEM; spin_lock_init(&edge_port->ep_lock); edge_port->port = port; edge_port->edge_serial = usb_get_serial_data(port->serial); edge_port->bUartMode = default_uart_mode; switch (port->port_number) { case 0: edge_port->uart_base = UMPMEM_BASE_UART1; edge_port->dma_address = UMPD_OEDB1_ADDRESS; break; case 1: edge_port->uart_base = UMPMEM_BASE_UART2; edge_port->dma_address = UMPD_OEDB2_ADDRESS; break; default: dev_err(&port->dev, "unknown port number\n"); ret = -ENODEV; goto err; } dev_dbg(&port->dev, "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n", __func__, port->port_number, edge_port->uart_base, edge_port->dma_address); usb_set_serial_port_data(port, edge_port); ret = edge_create_sysfs_attrs(port); if (ret) goto err; port->port.closing_wait = msecs_to_jiffies(closing_wait * 10); port->port.drain_delay = 1; return 0; err: kfree(edge_port); return ret; } static int edge_port_remove(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = usb_get_serial_port_data(port); edge_remove_sysfs_attrs(port); kfree(edge_port); return 0; } /* Sysfs Attributes */ static ssize_t uart_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); return sprintf(buf, "%d\n", edge_port->bUartMode); } static ssize_t uart_mode_store(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int v = simple_strtoul(valbuf, NULL, 0); dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v); if (v < 256) edge_port->bUartMode = v; else dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v); return count; } static DEVICE_ATTR_RW(uart_mode); static int edge_create_sysfs_attrs(struct usb_serial_port *port) { return device_create_file(&port->dev, &dev_attr_uart_mode); } static int edge_remove_sysfs_attrs(struct usb_serial_port *port) { device_remove_file(&port->dev, &dev_attr_uart_mode); return 0; } #ifdef CONFIG_PM static int edge_suspend(struct usb_serial *serial, pm_message_t message) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); cancel_delayed_work_sync(&edge_serial->heartbeat_work); return 0; } static int edge_resume(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); edge_heartbeat_schedule(edge_serial); return 0; } #endif static struct usb_serial_driver edgeport_1port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_1", }, .description = "Edgeport TI 1 port adapter", .id_table = edgeport_1port_id_table, .num_ports = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, #ifdef CONFIG_PM .suspend = edge_suspend, .resume = edge_resume, #endif }; static struct usb_serial_driver edgeport_2port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_2", }, .description = "Edgeport TI 2 port adapter", .id_table = edgeport_2port_id_table, .num_ports = 2, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, #ifdef CONFIG_PM .suspend = edge_suspend, .resume = edge_resume, #endif }; static struct usb_serial_driver * const serial_drivers[] = { &edgeport_1port_device, &edgeport_2port_device, NULL }; module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("edgeport/down3.bin"); module_param(closing_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs"); module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_cpu_rev, "Ignore the cpu revision when connecting to a device"); module_param(default_uart_mode, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
./CrossVul/dataset_final_sorted/CWE-191/c/bad_3345_0
crossvul-cpp_data_good_3345_0
/* * Edgeport USB Serial Converter driver * * Copyright (C) 2000-2002 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Supports the following devices: * EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT * * For questions or problems with this driver, contact Inside Out * Networks technical support, or Peter Berger <pberger@brimson.com>, * or Al Borchers <alborchers@steinerpoint.com>. */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/serial.h> #include <linux/swab.h> #include <linux/kfifo.h> #include <linux/ioctl.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "io_16654.h" #include "io_usbvend.h" #include "io_ti.h" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli" #define DRIVER_DESC "Edgeport USB Serial Driver" #define EPROM_PAGE_SIZE 64 /* different hardware types */ #define HARDWARE_TYPE_930 0 #define HARDWARE_TYPE_TIUMP 1 /* IOCTL_PRIVATE_TI_GET_MODE Definitions */ #define TI_MODE_CONFIGURING 0 /* Device has not entered start device */ #define TI_MODE_BOOT 1 /* Staying in boot mode */ #define TI_MODE_DOWNLOAD 2 /* Made it to download mode */ #define TI_MODE_TRANSITIONING 3 /* * Currently in boot mode but * transitioning to download mode */ /* read urb state */ #define EDGE_READ_URB_RUNNING 0 #define EDGE_READ_URB_STOPPING 1 #define EDGE_READ_URB_STOPPED 2 #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */ /* Product information read from the Edgeport */ struct product_info { int TiMode; /* Current TI Mode */ __u8 hardware_type; /* Type of hardware */ } __attribute__((packed)); /* * Edgeport firmware header * * "build_number" has been set to 0 in all three of the images I have * seen, and Digi Tech Support suggests that it is safe to ignore it. * * "length" is the number of bytes of actual data following the header. * * "checksum" is the low order byte resulting from adding the values of * all the data bytes. */ struct edgeport_fw_hdr { u8 major_version; u8 minor_version; __le16 build_number; __le16 length; u8 checksum; } __packed; struct edgeport_port { __u16 uart_base; __u16 dma_address; __u8 shadow_msr; __u8 shadow_mcr; __u8 shadow_lsr; __u8 lsr_mask; __u32 ump_read_timeout; /* * Number of milliseconds the UMP will * wait without data before completing * a read short */ int baud_rate; int close_pending; int lsr_event; struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ spinlock_t ep_lock; int ep_read_urb_state; int ep_write_urb_in_use; }; struct edgeport_serial { struct product_info product_info; u8 TI_I2C_Type; /* Type of I2C in UMP */ u8 TiReadI2C; /* * Set to TRUE if we have read the * I2c in Boot Mode */ struct mutex es_lock; int num_ports_open; struct usb_serial *serial; struct delayed_work heartbeat_work; int fw_version; bool use_heartbeat; }; /* Devices that this driver supports */ static const struct usb_device_id edgeport_1port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { } }; static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, /* The 4, 8 and 16 port devices show up as multiple 2 port devices */ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; /* Devices that this driver supports */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; MODULE_DEVICE_TABLE(usb, id_table_combined); static int closing_wait = EDGE_CLOSING_WAIT; static bool ignore_cpu_rev; static int default_uart_mode; /* RS232 */ static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data, int length); static void stop_read(struct edgeport_port *edge_port); static int restart_read(struct edgeport_port *edge_port); static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); static void edge_send(struct usb_serial_port *port, struct tty_struct *tty); static int do_download_mode(struct edgeport_serial *serial, const struct firmware *fw); static int do_boot_mode(struct edgeport_serial *serial, const struct firmware *fw); /* sysfs attributes */ static int edge_create_sysfs_attrs(struct usb_serial_port *port); static int edge_remove_sysfs_attrs(struct usb_serial_port *port); /* * Some release of Edgeport firmware "down3.bin" after version 4.80 * introduced code to automatically disconnect idle devices on some * Edgeport models after periods of inactivity, typically ~60 seconds. * This occurs without regard to whether ports on the device are open * or not. Digi International Tech Support suggested: * * 1. Adding driver "heartbeat" code to reset the firmware timer by * requesting a descriptor record every 15 seconds, which should be * effective with newer firmware versions that require it, and benign * with older versions that do not. In practice 40 seconds seems often * enough. * 2. The heartbeat code is currently required only on Edgeport/416 models. */ #define FW_HEARTBEAT_VERSION_CUTOFF ((4 << 8) + 80) #define FW_HEARTBEAT_SECS 40 /* Timeouts in msecs: firmware downloads take longer */ #define TI_VSEND_TIMEOUT_DEFAULT 1000 #define TI_VSEND_TIMEOUT_FW_DOWNLOAD 10000 static int ti_vread_sync(struct usb_device *dev, __u8 request, __u16 value, __u16 index, u8 *data, int size) { int status; status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN), value, index, data, size, 1000); if (status < 0) return status; if (status != size) { dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", __func__, size, status); return -ECOMM; } return 0; } static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value, u16 index, u8 *data, int size, int timeout) { int status; status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT), value, index, data, size, timeout); if (status < 0) return status; if (status != size) { dev_dbg(&dev->dev, "%s - wanted to write %d, but only wrote %d\n", __func__, size, status); return -ECOMM; } return 0; } static int send_cmd(struct usb_device *dev, __u8 command, __u8 moduleid, __u16 value, u8 *data, int size) { return ti_vsend_sync(dev, command, value, moduleid, data, size, TI_VSEND_TIMEOUT_DEFAULT); } /* clear tx/rx buffers and fifo in TI UMP */ static int purge_port(struct usb_serial_port *port, __u16 mask) { int port_number = port->port_number; dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask); return send_cmd(port->serial->dev, UMPC_PURGE_PORT, (__u8)(UMPM_UART1_PORT + port_number), mask, NULL, 0); } /** * read_download_mem - Read edgeport memory from TI chip * @dev: usb device pointer * @start_address: Device CPU address at which to read * @length: Length of above data * @address_type: Can read both XDATA and I2C * @buffer: pointer to input data buffer */ static int read_download_mem(struct usb_device *dev, int start_address, int length, __u8 address_type, __u8 *buffer) { int status = 0; __u8 read_length; u16 be_start_address; dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length); /* * Read in blocks of 64 bytes * (TI firmware can't handle more than 64 byte reads) */ while (length) { if (length > 64) read_length = 64; else read_length = (__u8)length; if (read_length > 1) { dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length); } /* * NOTE: Must use swab as wIndex is sent in little-endian * byte order regardless of host byte order. */ be_start_address = swab16((u16)start_address); status = ti_vread_sync(dev, UMPC_MEMORY_READ, (__u16)address_type, be_start_address, buffer, read_length); if (status) { dev_dbg(&dev->dev, "%s - ERROR %x\n", __func__, status); return status; } if (read_length > 1) usb_serial_debug_data(&dev->dev, __func__, read_length, buffer); /* Update pointers/length */ start_address += read_length; buffer += read_length; length -= read_length; } return status; } static int read_ram(struct usb_device *dev, int start_address, int length, __u8 *buffer) { return read_download_mem(dev, start_address, length, DTK_ADDR_SPACE_XDATA, buffer); } /* Read edgeport memory to a given block */ static int read_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; for (i = 0; i < length; i++) { status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, serial->TI_I2C_Type, (__u16)(start_address+i), &buffer[i], 0x01); if (status) { dev_dbg(&serial->serial->dev->dev, "%s - ERROR %x\n", __func__, status); return status; } } dev_dbg(&serial->serial->dev->dev, "%s - start_address = %x, length = %d\n", __func__, start_address, length); usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer); serial->TiReadI2C = 1; return status; } /* Write given block to TI EPROM memory */ static int write_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; u8 *temp; /* Must do a read before write */ if (!serial->TiReadI2C) { temp = kmalloc(1, GFP_KERNEL); if (!temp) return -ENOMEM; status = read_boot_mem(serial, 0, 1, temp); kfree(temp); if (status) return status; } for (i = 0; i < length; ++i) { status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, buffer[i], (u16)(i + start_address), NULL, 0, TI_VSEND_TIMEOUT_DEFAULT); if (status) return status; } dev_dbg(&serial->serial->dev->dev, "%s - start_sddr = %x, length = %d\n", __func__, start_address, length); usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer); return status; } /* Write edgeport I2C memory to TI chip */ static int write_i2c_mem(struct edgeport_serial *serial, int start_address, int length, __u8 address_type, __u8 *buffer) { struct device *dev = &serial->serial->dev->dev; int status = 0; int write_length; u16 be_start_address; /* We can only send a maximum of 1 aligned byte page at a time */ /* calculate the number of bytes left in the first page */ write_length = EPROM_PAGE_SIZE - (start_address & (EPROM_PAGE_SIZE - 1)); if (write_length > length) write_length = length; dev_dbg(dev, "%s - BytesInFirstPage Addr = %x, length = %d\n", __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); /* * Write first page. * * NOTE: Must use swab as wIndex is sent in little-endian byte order * regardless of host byte order. */ be_start_address = swab16((u16)start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (u16)address_type, be_start_address, buffer, write_length, TI_VSEND_TIMEOUT_DEFAULT); if (status) { dev_dbg(dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; /* * We should be aligned now -- can write max page size bytes at a * time. */ while (length) { if (length > EPROM_PAGE_SIZE) write_length = EPROM_PAGE_SIZE; else write_length = length; dev_dbg(dev, "%s - Page Write Addr = %x, length = %d\n", __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); /* * Write next page. * * NOTE: Must use swab as wIndex is sent in little-endian byte * order regardless of host byte order. */ be_start_address = swab16((u16)start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (u16)address_type, be_start_address, buffer, write_length, TI_VSEND_TIMEOUT_DEFAULT); if (status) { dev_err(dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; } return status; } /* * Examine the UMP DMA registers and LSR * * Check the MSBit of the X and Y DMA byte count registers. * A zero in this bit indicates that the TX DMA buffers are empty * then check the TX Empty bit in the UART. */ static int tx_active(struct edgeport_port *port) { int status; struct out_endpoint_desc_block *oedb; __u8 *lsr; int bytes_left = 0; oedb = kmalloc(sizeof(*oedb), GFP_KERNEL); if (!oedb) return -ENOMEM; /* * Sigh, that's right, just one byte, as not all platforms can * do DMA from stack */ lsr = kmalloc(1, GFP_KERNEL); if (!lsr) { kfree(oedb); return -ENOMEM; } /* Read the DMA Count Registers */ status = read_ram(port->port->serial->dev, port->dma_address, sizeof(*oedb), (void *)oedb); if (status) goto exit_is_tx_active; dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount); /* and the LSR */ status = read_ram(port->port->serial->dev, port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr); if (status) goto exit_is_tx_active; dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr); /* If either buffer has data or we are transmitting then return TRUE */ if ((oedb->XByteCount & 0x80) != 0) bytes_left += 64; if ((*lsr & UMP_UART_LSR_TX_MASK) == 0) bytes_left += 1; /* We return Not Active if we get any kind of error */ exit_is_tx_active: dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left); kfree(lsr); kfree(oedb); return bytes_left; } static int choose_config(struct usb_device *dev) { /* * There may be multiple configurations on this device, in which case * we would need to read and parse all of them to find out which one * we want. However, we just support one config at this point, * configuration # 1, which is Config Descriptor 0. */ dev_dbg(&dev->dev, "%s - Number of Interfaces = %d\n", __func__, dev->config->desc.bNumInterfaces); dev_dbg(&dev->dev, "%s - MAX Power = %d\n", __func__, dev->config->desc.bMaxPower * 2); if (dev->config->desc.bNumInterfaces != 1) { dev_err(&dev->dev, "%s - bNumInterfaces is not 1, ERROR!\n", __func__); return -ENODEV; } return 0; } static int read_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status; if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) { status = read_download_mem(serial->serial->dev, start_address, length, serial->TI_I2C_Type, buffer); } else { status = read_boot_mem(serial, start_address, length, buffer); } return status; } static int write_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { if (serial->product_info.TiMode == TI_MODE_BOOT) return write_boot_mem(serial, start_address, length, buffer); if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) return write_i2c_mem(serial, start_address, length, serial->TI_I2C_Type, buffer); return -EINVAL; } /* Read a descriptor header from I2C based on type */ static int get_descriptor_addr(struct edgeport_serial *serial, int desc_type, struct ti_i2c_desc *rom_desc) { int start_address; int status; /* Search for requested descriptor in I2C */ start_address = 2; do { status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) return 0; if (rom_desc->Type == desc_type) return start_address; start_address = start_address + sizeof(struct ti_i2c_desc) + le16_to_cpu(rom_desc->Size); } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); return 0; } /* Validate descriptor checksum */ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer) { __u16 i; __u8 cs = 0; for (i = 0; i < le16_to_cpu(rom_desc->Size); i++) cs = (__u8)(cs + buffer[i]); if (cs != rom_desc->CheckSum) { pr_debug("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs); return -EINVAL; } return 0; } /* Make sure that the I2C image is good */ static int check_i2c_image(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status = 0; struct ti_i2c_desc *rom_desc; int start_address = 2; __u8 *buffer; __u16 ttype; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) return -ENOMEM; buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL); if (!buffer) { kfree(rom_desc); return -ENOMEM; } /* Read the first byte (Signature0) must be 0x52 or 0x10 */ status = read_rom(serial, 0, 1, buffer); if (status) goto out; if (*buffer != UMP5152 && *buffer != UMP3410) { dev_err(dev, "%s - invalid buffer signature\n", __func__); status = -ENODEV; goto out; } do { /* Validate the I2C */ status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) break; if ((start_address + sizeof(struct ti_i2c_desc) + le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) { status = -ENODEV; dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__); break; } dev_dbg(dev, "%s Type = 0x%x\n", __func__, rom_desc->Type); /* Skip type 2 record */ ttype = rom_desc->Type & 0x0f; if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC && ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) { /* Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), le16_to_cpu(rom_desc->Size), buffer); if (status) break; status = valid_csum(rom_desc, buffer); if (status) break; } start_address = start_address + sizeof(struct ti_i2c_desc) + le16_to_cpu(rom_desc->Size); } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && (start_address < TI_MAX_I2C_SIZE)); if ((rom_desc->Type != I2C_DESC_TYPE_ION) || (start_address > TI_MAX_I2C_SIZE)) status = -ENODEV; out: kfree(buffer); kfree(rom_desc); return status; } static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer) { int status; int start_address; struct ti_i2c_desc *rom_desc; struct edge_ti_manuf_descriptor *desc; struct device *dev = &serial->serial->dev->dev; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) return -ENOMEM; start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION, rom_desc); if (!start_address) { dev_dbg(dev, "%s - Edge Descriptor not found in I2C\n", __func__); status = -ENODEV; goto exit; } /* Read the descriptor data */ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), le16_to_cpu(rom_desc->Size), buffer); if (status) goto exit; status = valid_csum(rom_desc, buffer); desc = (struct edge_ti_manuf_descriptor *)buffer; dev_dbg(dev, "%s - IonConfig 0x%x\n", __func__, desc->IonConfig); dev_dbg(dev, "%s - Version %d\n", __func__, desc->Version); dev_dbg(dev, "%s - Cpu/Board 0x%x\n", __func__, desc->CpuRev_BoardRev); dev_dbg(dev, "%s - NumPorts %d\n", __func__, desc->NumPorts); dev_dbg(dev, "%s - NumVirtualPorts %d\n", __func__, desc->NumVirtualPorts); dev_dbg(dev, "%s - TotalPorts %d\n", __func__, desc->TotalPorts); exit: kfree(rom_desc); return status; } /* Build firmware header used for firmware update */ static int build_i2c_fw_hdr(u8 *header, const struct firmware *fw) { __u8 *buffer; int buffer_size; int i; __u8 cs = 0; struct ti_i2c_desc *i2c_header; struct ti_i2c_image_header *img_header; struct ti_i2c_firmware_rec *firmware_rec; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; /* * In order to update the I2C firmware we must change the type 2 record * to type 0xF2. This will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will download the latest * firmware (padded to 15.5k) into the UMP ram. And finally when the * device comes back up in download mode the driver will cause the new * firmware to be copied from the UMP Ram to I2C and the firmware will * update the record type from 0xf2 to 0x02. */ /* * Allocate a 15.5k buffer + 2 bytes for version number (Firmware * Record) */ buffer_size = (((1024 * 16) - 512 ) + sizeof(struct ti_i2c_firmware_rec)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Set entire image of 0xffs */ memset(buffer, 0xff, buffer_size); /* Copy version number into firmware record */ firmware_rec = (struct ti_i2c_firmware_rec *)buffer; firmware_rec->Ver_Major = fw_hdr->major_version; firmware_rec->Ver_Minor = fw_hdr->minor_version; /* Pointer to fw_down memory image */ img_header = (struct ti_i2c_image_header *)&fw->data[4]; memcpy(buffer + sizeof(struct ti_i2c_firmware_rec), &fw->data[4 + sizeof(struct ti_i2c_image_header)], le16_to_cpu(img_header->Length)); for (i=0; i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } kfree(buffer); /* Build new header */ i2c_header = (struct ti_i2c_desc *)header; firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; i2c_header->Size = cpu_to_le16(buffer_size); i2c_header->CheckSum = cs; firmware_rec->Ver_Major = fw_hdr->major_version; firmware_rec->Ver_Minor = fw_hdr->minor_version; return 0; } /* Try to figure out what type of I2c we have */ static int i2c_type_bootmode(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status; u8 *data; data = kmalloc(1, GFP_KERNEL); if (!data) return -ENOMEM; /* Try to read type 2 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01); if (status) dev_dbg(dev, "%s - read 2 status error = %d\n", __func__, status); else dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dev_dbg(dev, "%s - ROM_TYPE_II\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto out; } /* Try to read type 3 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01); if (status) dev_dbg(dev, "%s - read 3 status error = %d\n", __func__, status); else dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dev_dbg(dev, "%s - ROM_TYPE_III\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; goto out; } dev_dbg(dev, "%s - Unknown\n", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = -ENODEV; out: kfree(data); return status; } static int bulk_xfer(struct usb_serial *serial, void *buffer, int length, int *num_sent) { int status; status = usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, serial->port[0]->bulk_out_endpointAddress), buffer, length, num_sent, 1000); return status; } /* Download given firmware image to the device (IN BOOT MODE) */ static int download_code(struct edgeport_serial *serial, __u8 *image, int image_length) { int status = 0; int pos; int transfer; int done; /* Transfer firmware image */ for (pos = 0; pos < image_length; ) { /* Read the next buffer from file */ transfer = image_length - pos; if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE) transfer = EDGE_FW_BULK_MAX_PACKET_SIZE; /* Transfer data */ status = bulk_xfer(serial->serial, &image[pos], transfer, &done); if (status) break; /* Advance buffer pointer */ pos += done; } return status; } /* FIXME!!! */ static int config_boot_dev(struct usb_device *dev) { return 0; } static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc) { return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev); } static int check_fw_sanity(struct edgeport_serial *serial, const struct firmware *fw) { u16 length_total; u8 checksum = 0; int pos; struct device *dev = &serial->serial->interface->dev; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; if (fw->size < sizeof(struct edgeport_fw_hdr)) { dev_err(dev, "incomplete fw header\n"); return -EINVAL; } length_total = le16_to_cpu(fw_hdr->length) + sizeof(struct edgeport_fw_hdr); if (fw->size != length_total) { dev_err(dev, "bad fw size (expected: %u, got: %zu)\n", length_total, fw->size); return -EINVAL; } for (pos = sizeof(struct edgeport_fw_hdr); pos < fw->size; ++pos) checksum += fw->data[pos]; if (checksum != fw_hdr->checksum) { dev_err(dev, "bad fw checksum (expected: 0x%x, got: 0x%x)\n", fw_hdr->checksum, checksum); return -EINVAL; } return 0; } /* * DownloadTIFirmware - Download run-time operating firmware to the TI5052 * * This routine downloads the main operating code into the TI5052, using the * boot code already burned into E2PROM or ROM. */ static int download_fw(struct edgeport_serial *serial) { struct device *dev = &serial->serial->interface->dev; int status = 0; struct usb_interface_descriptor *interface; const struct firmware *fw; const char *fw_name = "edgeport/down3.bin"; struct edgeport_fw_hdr *fw_hdr; status = request_firmware(&fw, fw_name, dev); if (status) { dev_err(dev, "Failed to load image \"%s\" err %d\n", fw_name, status); return status; } if (check_fw_sanity(serial, fw)) { status = -EINVAL; goto out; } fw_hdr = (struct edgeport_fw_hdr *)fw->data; /* If on-board version is newer, "fw_version" will be updated later. */ serial->fw_version = (fw_hdr->major_version << 8) + fw_hdr->minor_version; /* * This routine is entered by both the BOOT mode and the Download mode * We can determine which code is running by the reading the config * descriptor and if we have only one bulk pipe it is in boot mode */ serial->product_info.hardware_type = HARDWARE_TYPE_TIUMP; /* Default to type 2 i2c */ serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = choose_config(serial->serial->dev); if (status) goto out; interface = &serial->serial->interface->cur_altsetting->desc; if (!interface) { dev_err(dev, "%s - no interface set, error!\n", __func__); status = -ENODEV; goto out; } /* * Setup initial mode -- the default mode 0 is TI_MODE_CONFIGURING * if we have more than one endpoint we are definitely in download * mode */ if (interface->bNumEndpoints > 1) { serial->product_info.TiMode = TI_MODE_DOWNLOAD; status = do_download_mode(serial, fw); } else { /* Otherwise we will remain in configuring mode */ serial->product_info.TiMode = TI_MODE_CONFIGURING; status = do_boot_mode(serial, fw); } out: release_firmware(fw); return status; } static int do_download_mode(struct edgeport_serial *serial, const struct firmware *fw) { struct device *dev = &serial->serial->interface->dev; int status = 0; int start_address; struct edge_ti_manuf_descriptor *ti_manuf_desc; int download_cur_ver; int download_new_ver; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; struct ti_i2c_desc *rom_desc; dev_dbg(dev, "%s - RUNNING IN DOWNLOAD MODE\n", __func__); status = check_i2c_image(serial); if (status) { dev_dbg(dev, "%s - DOWNLOAD MODE -- BAD I2C\n", __func__); return status; } /* * Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) return -ENOMEM; status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); return status; } /* Check version number of ION descriptor */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); return -EINVAL; } rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { kfree(ti_manuf_desc); return -ENOMEM; } /* Search for type 2 record (firmware record) */ start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); if (start_address != 0) { struct ti_i2c_firmware_rec *firmware_version; u8 *record; dev_dbg(dev, "%s - Found Type FIRMWARE (Type 2) record\n", __func__); firmware_version = kmalloc(sizeof(*firmware_version), GFP_KERNEL); if (!firmware_version) { kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* * Validate version number * Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), sizeof(struct ti_i2c_firmware_rec), (__u8 *)firmware_version); if (status) { kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* * Check version number of download with current * version in I2c */ download_cur_ver = (firmware_version->Ver_Major << 8) + (firmware_version->Ver_Minor); download_new_ver = (fw_hdr->major_version << 8) + (fw_hdr->minor_version); dev_dbg(dev, "%s - >> FW Versions Device %d.%d Driver %d.%d\n", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, fw_hdr->major_version, fw_hdr->minor_version); /* * Check if we have an old version in the I2C and * update if necessary */ if (download_cur_ver < download_new_ver) { dev_dbg(dev, "%s - Update I2C dld from %d.%d to %d.%d\n", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, fw_hdr->major_version, fw_hdr->minor_version); record = kmalloc(1, GFP_KERNEL); if (!record) { kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* * In order to update the I2C firmware we must * change the type 2 record to type 0xF2. This * will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will * download the latest firmware (padded to * 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the * driver will cause the new firmware to be * copied from the UMP Ram to I2C and the * firmware will update the record type from * 0xf2 to 0x02. */ *record = I2C_DESC_TYPE_FIRMWARE_BLANK; /* * Change the I2C Firmware record type to * 0xf2 to trigger an update */ status = write_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* * verify the write -- must do this in order * for write to complete before we do the * hardware reset */ status = read_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) { dev_err(dev, "%s - error resetting device\n", __func__); kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } dev_dbg(dev, "%s - HARDWARE RESET\n", __func__); /* Reset UMP -- Back to BOOT MODE */ status = ti_vsend_sync(serial->serial->dev, UMPC_HARDWARE_RESET, 0, 0, NULL, 0, TI_VSEND_TIMEOUT_DEFAULT); dev_dbg(dev, "%s - HARDWARE RESET return %d\n", __func__, status); /* return an error on purpose. */ kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } /* Same or newer fw version is already loaded */ serial->fw_version = download_cur_ver; kfree(firmware_version); } /* Search for type 0xF2 record (firmware blank record) */ else { start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BLANK, rom_desc); if (start_address != 0) { #define HEADER_SIZE (sizeof(struct ti_i2c_desc) + \ sizeof(struct ti_i2c_firmware_rec)) __u8 *header; __u8 *vheader; header = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!header) { kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } vheader = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!vheader) { kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } dev_dbg(dev, "%s - Found Type BLANK FIRMWARE (Type F2) record\n", __func__); /* * In order to update the I2C firmware we must change * the type 2 record to type 0xF2. This will force the * UMP to come up in Boot Mode. Then while in boot * mode, the driver will download the latest firmware * (padded to 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the driver * will cause the new firmware to be copied from the * UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. */ status = build_i2c_fw_hdr(header, fw); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* * Update I2C with type 0xf2 record with correct * size and checksum */ status = write_rom(serial, start_address, HEADER_SIZE, header); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* * verify the write -- must do this in order for * write to complete before we do the hardware reset */ status = read_rom(serial, start_address, HEADER_SIZE, vheader); if (status) { dev_dbg(dev, "%s - can't read header back\n", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (memcmp(vheader, header, HEADER_SIZE)) { dev_dbg(dev, "%s - write download record failed\n", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } kfree(vheader); kfree(header); dev_dbg(dev, "%s - Start firmware update\n", __func__); /* Tell firmware to copy download image into I2C */ status = ti_vsend_sync(serial->serial->dev, UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0, TI_VSEND_TIMEOUT_FW_DOWNLOAD); dev_dbg(dev, "%s - Update complete 0x%x\n", __func__, status); if (status) { dev_err(dev, "%s - UMPC_COPY_DNLD_TO_I2C failed\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return status; } } } /* The device is running the download code */ kfree(rom_desc); kfree(ti_manuf_desc); return 0; } static int do_boot_mode(struct edgeport_serial *serial, const struct firmware *fw) { struct device *dev = &serial->serial->interface->dev; int status = 0; struct edge_ti_manuf_descriptor *ti_manuf_desc; struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data; dev_dbg(dev, "%s - RUNNING IN BOOT MODE\n", __func__); /* Configure the TI device so we can use the BULK pipes for download */ status = config_boot_dev(serial->serial->dev); if (status) return status; if (le16_to_cpu(serial->serial->dev->descriptor.idVendor) != USB_VENDOR_ID_ION) { dev_dbg(dev, "%s - VID = 0x%x\n", __func__, le16_to_cpu(serial->serial->dev->descriptor.idVendor)); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto stayinbootmode; } /* * We have an ION device (I2c Must be programmed) * Determine I2C image type */ if (i2c_type_bootmode(serial)) goto stayinbootmode; /* Check for ION Vendor ID and that the I2C is valid */ if (!check_i2c_image(serial)) { struct ti_i2c_image_header *header; int i; __u8 cs = 0; __u8 *buffer; int buffer_size; /* * Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) return -ENOMEM; status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); goto stayinbootmode; } /* Check for version 2 */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); goto stayinbootmode; } kfree(ti_manuf_desc); /* * In order to update the I2C firmware we must change the type * 2 record to type 0xF2. This will force the UMP to come up * in Boot Mode. Then while in boot mode, the driver will * download the latest firmware (padded to 15.5k) into the * UMP ram. Finally when the device comes back up in download * mode the driver will cause the new firmware to be copied * from the UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. * * Do we really have to copy the whole firmware image, * or could we do this in place! */ /* Allocate a 15.5k buffer + 3 byte header */ buffer_size = (((1024 * 16) - 512) + sizeof(struct ti_i2c_image_header)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Initialize the buffer to 0xff (pad the buffer) */ memset(buffer, 0xff, buffer_size); memcpy(buffer, &fw->data[4], fw->size - 4); for (i = sizeof(struct ti_i2c_image_header); i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } header = (struct ti_i2c_image_header *)buffer; /* update length and checksum after padding */ header->Length = cpu_to_le16((__u16)(buffer_size - sizeof(struct ti_i2c_image_header))); header->CheckSum = cs; /* Download the operational code */ dev_dbg(dev, "%s - Downloading operational code image version %d.%d (TI UMP)\n", __func__, fw_hdr->major_version, fw_hdr->minor_version); status = download_code(serial, buffer, buffer_size); kfree(buffer); if (status) { dev_dbg(dev, "%s - Error downloading operational code image\n", __func__); return status; } /* Device will reboot */ serial->product_info.TiMode = TI_MODE_TRANSITIONING; dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); return 1; } stayinbootmode: /* Eprom is invalid or blank stay in boot mode */ dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); serial->product_info.TiMode = TI_MODE_BOOT; return 1; } static int ti_do_config(struct edgeport_port *port, int feature, int on) { int port_number = port->port->port_number; on = !!on; /* 1 or 0 not bitmask */ return send_cmd(port->port->serial->dev, feature, (__u8)(UMPM_UART1_PORT + port_number), on, NULL, 0); } static int restore_mcr(struct edgeport_port *port, __u8 mcr) { int status = 0; dev_dbg(&port->port->dev, "%s - %x\n", __func__, mcr); status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR); if (status) return status; status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS); if (status) return status; return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK); } /* Convert TI LSR to standard UART flags */ static __u8 map_line_status(__u8 ti_lsr) { __u8 lsr = 0; #define MAP_FLAG(flagUmp, flagUart) \ if (ti_lsr & flagUmp) \ lsr |= flagUart; MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */ MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */ MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */ MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */ MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */ MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */ #undef MAP_FLAG return lsr; } static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) { struct async_icount *icount; struct tty_struct *tty; dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, msr); if (msr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR | EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) { icount = &edge_port->port->icount; /* update input line counters */ if (msr & EDGEPORT_MSR_DELTA_CTS) icount->cts++; if (msr & EDGEPORT_MSR_DELTA_DSR) icount->dsr++; if (msr & EDGEPORT_MSR_DELTA_CD) icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&edge_port->port->port.delta_msr_wait); } /* Save the new modem status */ edge_port->shadow_msr = msr & 0xf0; tty = tty_port_tty_get(&edge_port->port->port); /* handle CTS flow control */ if (tty && C_CRTSCTS(tty)) { if (msr & EDGEPORT_MSR_CTS) tty_wakeup(tty); } tty_kref_put(tty); } static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data, __u8 lsr, __u8 data) { struct async_icount *icount; __u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)); dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr); edge_port->shadow_lsr = lsr; if (new_lsr & LSR_BREAK) /* * Parity and Framing errors only count if they * occur exclusive of a break being received. */ new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK); /* Place LSR data byte into Rx buffer */ if (lsr_data) edge_tty_recv(edge_port->port, &data, 1); /* update input line counters */ icount = &edge_port->port->icount; if (new_lsr & LSR_BREAK) icount->brk++; if (new_lsr & LSR_OVER_ERR) icount->overrun++; if (new_lsr & LSR_PAR_ERR) icount->parity++; if (new_lsr & LSR_FRM_ERR) icount->frame++; } static void edge_interrupt_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct usb_serial_port *port; struct edgeport_port *edge_port; struct device *dev; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int port_number; int function; int retval; __u8 lsr; __u8 msr; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero urb status received: " "%d\n", __func__, status); goto exit; } if (!length) { dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__); goto exit; } dev = &edge_serial->serial->dev->dev; usb_serial_debug_data(dev, __func__, length, data); if (length != 2) { dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length); goto exit; } port_number = TIUMP_GET_PORT_FROM_CODE(data[0]); function = TIUMP_GET_FUNC_FROM_CODE(data[0]); dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, port_number, function, data[1]); if (port_number >= edge_serial->serial->num_ports) { dev_err(dev, "bad port number %d\n", port_number); goto exit; } port = edge_serial->serial->port[port_number]; edge_port = usb_get_serial_port_data(port); if (!edge_port) { dev_dbg(dev, "%s - edge_port not found\n", __func__); return; } switch (function) { case TIUMP_INTERRUPT_CODE_LSR: lsr = map_line_status(data[1]); if (lsr & UMP_UART_LSR_DATA_MASK) { /* * Save the LSR event for bulk read completion routine */ dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n", __func__, port_number, lsr); edge_port->lsr_event = 1; edge_port->lsr_mask = lsr; } else { dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n", __func__, port_number, lsr); handle_new_lsr(edge_port, 0, lsr, 0); } break; case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ /* Copy MSR from UMP */ msr = data[1]; dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n", __func__, port_number, msr); handle_new_msr(edge_port, msr); break; default: dev_err(&urb->dev->dev, "%s - Unknown Interrupt code from UMP %x\n", __func__, data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_bulk_in_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; struct device *dev = &edge_port->port->dev; unsigned char *data = urb->transfer_buffer; int retval = 0; int port_number; int status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status); } if (status == -EPIPE) goto exit; if (status) { dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__); return; } port_number = edge_port->port->port_number; if (urb->actual_length > 0 && edge_port->lsr_event) { edge_port->lsr_event = 0; dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", __func__, port_number, edge_port->lsr_mask, *data); handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data); /* Adjust buffer length/pointer */ --urb->actual_length; ++data; } if (urb->actual_length) { usb_serial_debug_data(dev, __func__, urb->actual_length, data); if (edge_port->close_pending) dev_dbg(dev, "%s - close pending, dropping data on the floor\n", __func__); else edge_tty_recv(edge_port->port, data, urb->actual_length); edge_port->port->icount.rx += urb->actual_length; } exit: /* continue read unless stopped */ spin_lock(&edge_port->ep_lock); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) retval = usb_submit_urb(urb, GFP_ATOMIC); else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED; spin_unlock(&edge_port->ep_lock); if (retval) dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_tty_recv(struct usb_serial_port *port, unsigned char *data, int length) { int queued; queued = tty_insert_flip_string(&port->port, data, length); if (queued < length) dev_err(&port->dev, "%s - dropping data, %d bytes lost\n", __func__, length - queued); tty_flip_buffer_push(&port->port); } static void edge_bulk_out_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status = urb->status; struct tty_struct *tty; edge_port->ep_write_urb_in_use = 0; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_err_console(port, "%s - nonzero write bulk status " "received: %d\n", __func__, status); } /* send any buffered data */ tty = tty_port_tty_get(&port->port); edge_send(port, tty); tty_kref_put(tty); } static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial; struct usb_device *dev; struct urb *urb; int port_number; int status; u16 open_settings; u8 transaction_timeout; if (edge_port == NULL) return -ENODEV; port_number = port->port_number; dev = port->serial->dev; /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); if (status) { dev_err(&port->dev, "%s - cannot send clear loopback command, %d\n", __func__, status); return status; } /* set up the port settings */ if (tty) edge_set_termios(tty, port, &tty->termios); /* open up the port */ /* milliseconds to timeout for DMA transfer */ transaction_timeout = 2; edge_port->ump_read_timeout = max(20, ((transaction_timeout * 3) / 2)); /* milliseconds to timeout for DMA transfer */ open_settings = (u8)(UMP_DMA_MODE_CONTINOUS | UMP_PIPE_TRANS_TIMEOUT_ENA | (transaction_timeout << 2)); dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__); /* Tell TI to open and start the port */ status = send_cmd(dev, UMPC_OPEN_PORT, (u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command, %d\n", __func__, status); return status; } /* Start the DMA? */ status = send_cmd(dev, UMPC_START_PORT, (u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start DMA command, %d\n", __func__, status); return status; } /* Clear TX and RX buffers in UMP */ status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN); if (status) { dev_err(&port->dev, "%s - cannot send clear buffers command, %d\n", __func__, status); return status; } /* Read Initial MSR */ status = ti_vread_sync(dev, UMPC_READ_MSR, 0, (__u16)(UMPM_UART1_PORT + port_number), &edge_port->shadow_msr, 1); if (status) { dev_err(&port->dev, "%s - cannot send read MSR command, %d\n", __func__, status); return status; } dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr); /* Set Initial MCR */ edge_port->shadow_mcr = MCR_RTS | MCR_DTR; dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr); edge_serial = edge_port->edge_serial; if (mutex_lock_interruptible(&edge_serial->es_lock)) return -ERESTARTSYS; if (edge_serial->num_ports_open == 0) { /* we are the first port to open, post the interrupt urb */ urb = edge_serial->serial->port[0]->interrupt_in_urb; if (!urb) { dev_err(&port->dev, "%s - no interrupt urb present, exiting\n", __func__); status = -EINVAL; goto release_es_lock; } urb->context = edge_serial; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - usb_submit_urb failed with value %d\n", __func__, status); goto release_es_lock; } } /* * reset the data toggle on the bulk endpoints to work around bug in * host controllers where things get out of sync some times */ usb_clear_halt(dev, port->write_urb->pipe); usb_clear_halt(dev, port->read_urb->pipe); /* start up our bulk read urb */ urb = port->read_urb; if (!urb) { dev_err(&port->dev, "%s - no read urb present, exiting\n", __func__); status = -EINVAL; goto unlink_int_urb; } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; urb->context = edge_port; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - read bulk usb_submit_urb failed with value %d\n", __func__, status); goto unlink_int_urb; } ++edge_serial->num_ports_open; goto release_es_lock; unlink_int_urb: if (edge_port->edge_serial->num_ports_open == 0) usb_kill_urb(port->serial->port[0]->interrupt_in_urb); release_es_lock: mutex_unlock(&edge_serial->es_lock); return status; } static void edge_close(struct usb_serial_port *port) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; struct usb_serial *serial = port->serial; unsigned long flags; int port_number; edge_serial = usb_get_serial_data(port->serial); edge_port = usb_get_serial_port_data(port); if (edge_serial == NULL || edge_port == NULL) return; /* * The bulkreadcompletion routine will check * this flag and dump add read data */ edge_port->close_pending = 1; usb_kill_urb(port->read_urb); usb_kill_urb(port->write_urb); edge_port->ep_write_urb_in_use = 0; spin_lock_irqsave(&edge_port->ep_lock, flags); kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__); port_number = port->port_number; send_cmd(serial->dev, UMPC_CLOSE_PORT, (__u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); mutex_lock(&edge_serial->es_lock); --edge_port->edge_serial->num_ports_open; if (edge_port->edge_serial->num_ports_open <= 0) { /* last port is now closed, let's shut down our interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); edge_port->edge_serial->num_ports_open = 0; } mutex_unlock(&edge_serial->es_lock); edge_port->close_pending = 0; } static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); if (count == 0) { dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__); return 0; } if (edge_port == NULL) return -ENODEV; if (edge_port->close_pending == 1) return -ENODEV; count = kfifo_in_locked(&port->write_fifo, data, count, &edge_port->ep_lock); edge_send(port, tty); return count; } static void edge_send(struct usb_serial_port *port, struct tty_struct *tty) { int count, result; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_write_urb_in_use) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } count = kfifo_out(&port->write_fifo, port->write_urb->transfer_buffer, port->bulk_out_size); if (count == 0) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } edge_port->ep_write_urb_in_use = 1; spin_unlock_irqrestore(&edge_port->ep_lock, flags); usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer); /* set up our urb */ port->write_urb->transfer_buffer_length = count; /* send the data out the bulk port */ result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); edge_port->ep_write_urb_in_use = 0; /* TODO: reschedule edge_send */ } else edge_port->port->icount.tx += count; /* * wakeup any process waiting for writes to complete * there is now more room in the buffer for new writes */ if (tty) tty_wakeup(tty); } static int edge_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int room = 0; unsigned long flags; if (edge_port == NULL) return 0; if (edge_port->close_pending == 1) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); room = kfifo_avail(&port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, room); return room; } static int edge_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; if (edge_port == NULL) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); chars = kfifo_len(&port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } static bool edge_tx_empty(struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); int ret; ret = tx_active(edge_port); if (ret > 0) return false; return true; } static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = edge_write(tty, port, &stop_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status); } } /* * if we are implementing RTS/CTS, stop reads * and the Edgeport will clear the RTS line */ if (C_CRTSCTS(tty)) stop_read(edge_port); } static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status); } } /* * if we are implementing RTS/CTS, restart reads * are the Edgeport will assert the RTS line */ if (C_CRTSCTS(tty)) { status = restart_read(edge_port); if (status) dev_err(&port->dev, "%s - read bulk usb_submit_urb failed: %d\n", __func__, status); } } static void stop_read(struct edgeport_port *edge_port) { unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING; edge_port->shadow_mcr &= ~MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); } static int restart_read(struct edgeport_port *edge_port) { struct urb *urb; int status = 0; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) { urb = edge_port->port->read_urb; status = usb_submit_urb(urb, GFP_ATOMIC); } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; edge_port->shadow_mcr |= MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); return status; } static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, struct ktermios *old_termios) { struct device *dev = &edge_port->port->dev; struct ump_uart_config *config; int baud; unsigned cflag; int status; int port_number = edge_port->port->port_number; config = kmalloc (sizeof (*config), GFP_KERNEL); if (!config) { tty->termios = *old_termios; return; } cflag = tty->termios.c_cflag; config->wFlags = 0; /* These flags must be set */ config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT; config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR; config->bUartMode = (__u8)(edge_port->bUartMode); switch (cflag & CSIZE) { case CS5: config->bDataBits = UMP_UART_CHAR5BITS; dev_dbg(dev, "%s - data bits = 5\n", __func__); break; case CS6: config->bDataBits = UMP_UART_CHAR6BITS; dev_dbg(dev, "%s - data bits = 6\n", __func__); break; case CS7: config->bDataBits = UMP_UART_CHAR7BITS; dev_dbg(dev, "%s - data bits = 7\n", __func__); break; default: case CS8: config->bDataBits = UMP_UART_CHAR8BITS; dev_dbg(dev, "%s - data bits = 8\n", __func__); break; } if (cflag & PARENB) { if (cflag & PARODD) { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_ODDPARITY; dev_dbg(dev, "%s - parity = odd\n", __func__); } else { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_EVENPARITY; dev_dbg(dev, "%s - parity = even\n", __func__); } } else { config->bParity = UMP_UART_NOPARITY; dev_dbg(dev, "%s - parity = none\n", __func__); } if (cflag & CSTOPB) { config->bStopBits = UMP_UART_STOPBIT2; dev_dbg(dev, "%s - stop bits = 2\n", __func__); } else { config->bStopBits = UMP_UART_STOPBIT1; dev_dbg(dev, "%s - stop bits = 1\n", __func__); } /* figure out the flow control settings */ if (cflag & CRTSCTS) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW; config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW; dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__); } else { dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__); restart_read(edge_port); } /* * if we are implementing XON/XOFF, set the start and stop * character in the device */ config->cXon = START_CHAR(tty); config->cXoff = STOP_CHAR(tty); /* if we are implementing INBOUND XON/XOFF */ if (I_IXOFF(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_IN_X; dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, config->cXon, config->cXoff); } else dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__); /* if we are implementing OUTBOUND XON/XOFF */ if (I_IXON(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X; dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n", __func__, config->cXon, config->cXoff); } else dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__); tty->termios.c_cflag &= ~CMSPAR; /* Round the baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ baud = 9600; } else tty_encode_baud_rate(tty, baud, baud); edge_port->baud_rate = baud; config->wBaudRate = (__u16)((461550L + baud/2) / baud); /* FIXME: Recompute actual baud from divisor here */ dev_dbg(dev, "%s - baud rate = %d, wBaudRate = %d\n", __func__, baud, config->wBaudRate); dev_dbg(dev, "wBaudRate: %d\n", (int)(461550L / config->wBaudRate)); dev_dbg(dev, "wFlags: 0x%x\n", config->wFlags); dev_dbg(dev, "bDataBits: %d\n", config->bDataBits); dev_dbg(dev, "bParity: %d\n", config->bParity); dev_dbg(dev, "bStopBits: %d\n", config->bStopBits); dev_dbg(dev, "cXon: %d\n", config->cXon); dev_dbg(dev, "cXoff: %d\n", config->cXoff); dev_dbg(dev, "bUartMode: %d\n", config->bUartMode); /* move the word values into big endian mode */ cpu_to_be16s(&config->wFlags); cpu_to_be16s(&config->wBaudRate); status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG, (__u8)(UMPM_UART1_PORT + port_number), 0, (__u8 *)config, sizeof(*config)); if (status) dev_dbg(dev, "%s - error %d when trying to write config to device\n", __func__, status); kfree(config); } static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int cflag; cflag = tty->termios.c_cflag; dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__, tty->termios.c_cflag, tty->termios.c_iflag); dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__, old_termios->c_cflag, old_termios->c_iflag); if (edge_port == NULL) return; /* change the port settings to the new ones specified */ change_port_settings(tty, edge_port, old_termios); } static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); mcr = edge_port->shadow_mcr; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; edge_port->shadow_mcr = mcr; spin_unlock_irqrestore(&edge_port->ep_lock, flags); restore_mcr(edge_port, mcr); return 0; } static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); msr = edge_port->shadow_msr; mcr = edge_port->shadow_mcr; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ dev_dbg(&port->dev, "%s -- %x\n", __func__, result); spin_unlock_irqrestore(&edge_port->ep_lock, flags); return result; } static int get_serial_info(struct edgeport_port *edge_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; unsigned cwait; cwait = edge_port->port->port.closing_wait; if (cwait != ASYNC_CLOSING_WAIT_NONE) cwait = jiffies_to_msecs(cwait) / 10; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = edge_port->port->minor; tmp.port = edge_port->port->port_number; tmp.irq = 0; tmp.xmit_fifo_size = edge_port->port->bulk_out_size; tmp.baud_base = 9600; tmp.close_delay = 5*HZ; tmp.closing_wait = cwait; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); switch (cmd) { case TIOCGSERIAL: dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__); return get_serial_info(edge_port, (struct serial_struct __user *) arg); } return -ENOIOCTLCMD; } static void edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; int bv = 0; /* Off */ if (break_state == -1) bv = 1; /* On */ status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv); if (status) dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n", __func__, status); } static void edge_heartbeat_schedule(struct edgeport_serial *edge_serial) { if (!edge_serial->use_heartbeat) return; schedule_delayed_work(&edge_serial->heartbeat_work, FW_HEARTBEAT_SECS * HZ); } static void edge_heartbeat_work(struct work_struct *work) { struct edgeport_serial *serial; struct ti_i2c_desc *rom_desc; serial = container_of(work, struct edgeport_serial, heartbeat_work.work); rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); /* Descriptor address request is enough to reset the firmware timer */ if (!rom_desc || !get_descriptor_addr(serial, I2C_DESC_TYPE_ION, rom_desc)) { dev_err(&serial->serial->interface->dev, "%s - Incomplete heartbeat\n", __func__); } kfree(rom_desc); edge_heartbeat_schedule(serial); } static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; int status; u16 product_id; /* Make sure we have the required endpoints when in download mode. */ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) { if (serial->num_bulk_in < serial->num_ports || serial->num_bulk_out < serial->num_ports) return -ENODEV; } /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (!edge_serial) return -ENOMEM; mutex_init(&edge_serial->es_lock); edge_serial->serial = serial; INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work); usb_set_serial_data(serial, edge_serial); status = download_fw(edge_serial); if (status < 0) { kfree(edge_serial); return status; } if (status > 0) return 1; /* bind but do not register any ports */ product_id = le16_to_cpu( edge_serial->serial->dev->descriptor.idProduct); /* Currently only the EP/416 models require heartbeat support */ if (edge_serial->fw_version > FW_HEARTBEAT_VERSION_CUTOFF) { if (product_id == ION_DEVICE_ID_TI_EDGEPORT_416 || product_id == ION_DEVICE_ID_TI_EDGEPORT_416B) { edge_serial->use_heartbeat = true; } } edge_heartbeat_schedule(edge_serial); return 0; } static void edge_disconnect(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); cancel_delayed_work_sync(&edge_serial->heartbeat_work); } static void edge_release(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); cancel_delayed_work_sync(&edge_serial->heartbeat_work); kfree(edge_serial); } static int edge_port_probe(struct usb_serial_port *port) { struct edgeport_port *edge_port; int ret; edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL); if (!edge_port) return -ENOMEM; spin_lock_init(&edge_port->ep_lock); edge_port->port = port; edge_port->edge_serial = usb_get_serial_data(port->serial); edge_port->bUartMode = default_uart_mode; switch (port->port_number) { case 0: edge_port->uart_base = UMPMEM_BASE_UART1; edge_port->dma_address = UMPD_OEDB1_ADDRESS; break; case 1: edge_port->uart_base = UMPMEM_BASE_UART2; edge_port->dma_address = UMPD_OEDB2_ADDRESS; break; default: dev_err(&port->dev, "unknown port number\n"); ret = -ENODEV; goto err; } dev_dbg(&port->dev, "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n", __func__, port->port_number, edge_port->uart_base, edge_port->dma_address); usb_set_serial_port_data(port, edge_port); ret = edge_create_sysfs_attrs(port); if (ret) goto err; port->port.closing_wait = msecs_to_jiffies(closing_wait * 10); port->port.drain_delay = 1; return 0; err: kfree(edge_port); return ret; } static int edge_port_remove(struct usb_serial_port *port) { struct edgeport_port *edge_port; edge_port = usb_get_serial_port_data(port); edge_remove_sysfs_attrs(port); kfree(edge_port); return 0; } /* Sysfs Attributes */ static ssize_t uart_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); return sprintf(buf, "%d\n", edge_port->bUartMode); } static ssize_t uart_mode_store(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int v = simple_strtoul(valbuf, NULL, 0); dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v); if (v < 256) edge_port->bUartMode = v; else dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v); return count; } static DEVICE_ATTR_RW(uart_mode); static int edge_create_sysfs_attrs(struct usb_serial_port *port) { return device_create_file(&port->dev, &dev_attr_uart_mode); } static int edge_remove_sysfs_attrs(struct usb_serial_port *port) { device_remove_file(&port->dev, &dev_attr_uart_mode); return 0; } #ifdef CONFIG_PM static int edge_suspend(struct usb_serial *serial, pm_message_t message) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); cancel_delayed_work_sync(&edge_serial->heartbeat_work); return 0; } static int edge_resume(struct usb_serial *serial) { struct edgeport_serial *edge_serial = usb_get_serial_data(serial); edge_heartbeat_schedule(edge_serial); return 0; } #endif static struct usb_serial_driver edgeport_1port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_1", }, .description = "Edgeport TI 1 port adapter", .id_table = edgeport_1port_id_table, .num_ports = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, #ifdef CONFIG_PM .suspend = edge_suspend, .resume = edge_resume, #endif }; static struct usb_serial_driver edgeport_2port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_2", }, .description = "Edgeport TI 2 port adapter", .id_table = edgeport_2port_id_table, .num_ports = 2, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_port_probe, .port_remove = edge_port_remove, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, #ifdef CONFIG_PM .suspend = edge_suspend, .resume = edge_resume, #endif }; static struct usb_serial_driver * const serial_drivers[] = { &edgeport_1port_device, &edgeport_2port_device, NULL }; module_usb_serial_driver(serial_drivers, id_table_combined); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("edgeport/down3.bin"); module_param(closing_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs"); module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_cpu_rev, "Ignore the cpu revision when connecting to a device"); module_param(default_uart_mode, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
./CrossVul/dataset_final_sorted/CWE-191/c/good_3345_0
crossvul-cpp_data_good_247_0
/** * @file * IMAP helper functions * * @authors * Copyright (C) 1996-1998,2010,2012-2013 Michael R. Elkins <me@mutt.org> * Copyright (C) 1996-1999 Brandon Long <blong@fiction.net> * Copyright (C) 1999-2009,2012 Brendan Cully <brendan@kublai.com> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page imap_util IMAP helper functions * * IMAP helper functions */ #include "config.h" #include <ctype.h> #include <errno.h> #include <netdb.h> #include <netinet/in.h> #include <signal.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include "imap_private.h" #include "mutt/mutt.h" #include "conn/conn.h" #include "bcache.h" #include "context.h" #include "globals.h" #include "header.h" #include "imap/imap.h" #include "mailbox.h" #include "message.h" #include "mutt_account.h" #include "mutt_socket.h" #include "mx.h" #include "options.h" #include "protos.h" #include "url.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif /** * imap_expand_path - Canonicalise an IMAP path * @param path Buffer containing path * @param len Buffer length * @retval 0 Success * @retval -1 Error * * IMAP implementation of mutt_expand_path. Rewrite an IMAP path in canonical * and absolute form. The buffer is rewritten in place with the canonical IMAP * path. * * Function can fail if imap_parse_path() or url_tostring() fail, * of if the buffer isn't large enough. */ int imap_expand_path(char *path, size_t len) { struct ImapMbox mx; struct ImapData *idata = NULL; struct Url url; char fixedpath[LONG_STRING]; int rc; if (imap_parse_path(path, &mx) < 0) return -1; idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); mutt_account_tourl(&mx.account, &url); imap_fix_path(idata, mx.mbox, fixedpath, sizeof(fixedpath)); url.path = fixedpath; rc = url_tostring(&url, path, len, U_DECODE_PASSWD); FREE(&mx.mbox); return rc; } /** * imap_get_parent - Get an IMAP folder's parent * @param output Buffer for the result * @param mbox Mailbox whose parent is to be determined * @param olen Length of the buffer * @param delim Path delimiter */ void imap_get_parent(char *output, const char *mbox, size_t olen, char delim) { int n; /* Make a copy of the mailbox name, but only if the pointers are different */ if (mbox != output) mutt_str_strfcpy(output, mbox, olen); n = mutt_str_strlen(output); /* Let's go backwards until the next delimiter * * If output[n] is a '/', the first n-- will allow us * to ignore it. If it isn't, then output looks like * "/aaaaa/bbbb". There is at least one "b", so we can't skip * the "/" after the 'a's. * * If output == '/', then n-- => n == 0, so the loop ends * immediately */ for (n--; n >= 0 && output[n] != delim; n--) ; /* We stopped before the beginning. There is a trailing * slash. */ if (n > 0) { /* Strip the trailing delimiter. */ output[n] = '\0'; } else { output[0] = (n == 0) ? delim : '\0'; } } /** * imap_get_parent_path - Get the path of the parent folder * @param output Buffer for the result * @param path Mailbox whose parent is to be determined * @param olen Length of the buffer * * Provided an imap path, returns in output the parent directory if * existent. Else returns the same path. */ void imap_get_parent_path(char *output, const char *path, size_t olen) { struct ImapMbox mx; struct ImapData *idata = NULL; char mbox[LONG_STRING] = ""; if (imap_parse_path(path, &mx) < 0) { mutt_str_strfcpy(output, path, olen); return; } idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); if (!idata) { mutt_str_strfcpy(output, path, olen); return; } /* Stores a fixed path in mbox */ imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); /* Gets the parent mbox in mbox */ imap_get_parent(mbox, mbox, sizeof(mbox), idata->delim); /* Returns a fully qualified IMAP url */ imap_qualify_path(output, olen, &mx, mbox); FREE(&mx.mbox); } /** * imap_clean_path - Cleans an IMAP path using imap_fix_path * @param path Path to be cleaned * @param plen Length of the buffer * * Does it in place. */ void imap_clean_path(char *path, size_t plen) { struct ImapMbox mx; struct ImapData *idata = NULL; char mbox[LONG_STRING] = ""; if (imap_parse_path(path, &mx) < 0) return; idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); if (!idata) return; /* Stores a fixed path in mbox */ imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); /* Returns a fully qualified IMAP url */ imap_qualify_path(path, plen, &mx, mbox); } #ifdef USE_HCACHE /** * imap_hcache_namer - Generate a filename for the header cache * @param path Path for the header cache file * @param dest Buffer for result * @param dlen Length of buffer * @retval num Chars written to dest */ static int imap_hcache_namer(const char *path, char *dest, size_t dlen) { return snprintf(dest, dlen, "%s.hcache", path); } /** * imap_hcache_open - Open a header cache * @param idata Server data * @param path Path to the header cache * @retval ptr HeaderCache * @retval NULL Failure */ header_cache_t *imap_hcache_open(struct ImapData *idata, const char *path) { struct ImapMbox mx; struct Url url; char cachepath[PATH_MAX]; char mbox[PATH_MAX]; if (path) imap_cachepath(idata, path, mbox, sizeof(mbox)); else { if (!idata->ctx || imap_parse_path(idata->ctx->path, &mx) < 0) return NULL; imap_cachepath(idata, mx.mbox, mbox, sizeof(mbox)); FREE(&mx.mbox); } if (strstr(mbox, "/../") || (strcmp(mbox, "..") == 0) || (strncmp(mbox, "../", 3) == 0)) return NULL; size_t len = strlen(mbox); if ((len > 3) && (strcmp(mbox + len - 3, "/..") == 0)) return NULL; mutt_account_tourl(&idata->conn->account, &url); url.path = mbox; url_tostring(&url, cachepath, sizeof(cachepath), U_PATH); return mutt_hcache_open(HeaderCache, cachepath, imap_hcache_namer); } /** * imap_hcache_close - Close the header cache * @param idata Server data */ void imap_hcache_close(struct ImapData *idata) { if (!idata->hcache) return; mutt_hcache_close(idata->hcache); idata->hcache = NULL; } /** * imap_hcache_get - Get a header cache entry by its UID * @param idata Server data * @param uid UID to find * @retval ptr Email Header * @retval NULL Failure */ struct Header *imap_hcache_get(struct ImapData *idata, unsigned int uid) { char key[16]; void *uv = NULL; struct Header *h = NULL; if (!idata->hcache) return NULL; sprintf(key, "/%u", uid); uv = mutt_hcache_fetch(idata->hcache, key, imap_hcache_keylen(key)); if (uv) { if (*(unsigned int *) uv == idata->uid_validity) h = mutt_hcache_restore(uv); else mutt_debug(3, "hcache uidvalidity mismatch: %u\n", *(unsigned int *) uv); mutt_hcache_free(idata->hcache, &uv); } return h; } /** * imap_hcache_put - Add an entry to the header cache * @param idata Server data * @param h Email Header * @retval 0 Success * @retval -1 Failure */ int imap_hcache_put(struct ImapData *idata, struct Header *h) { char key[16]; if (!idata->hcache) return -1; sprintf(key, "/%u", HEADER_DATA(h)->uid); return mutt_hcache_store(idata->hcache, key, imap_hcache_keylen(key), h, idata->uid_validity); } /** * imap_hcache_del - Delete an item from the header cache * @param idata Server data * @param uid UID of entry to delete * @retval 0 Success * @retval -1 Failure */ int imap_hcache_del(struct ImapData *idata, unsigned int uid) { char key[16]; if (!idata->hcache) return -1; sprintf(key, "/%u", uid); return mutt_hcache_delete(idata->hcache, key, imap_hcache_keylen(key)); } #endif /** * imap_parse_path - Parse an IMAP mailbox name into name,host,port * @param path Mailbox path to parse * @param mx An IMAP mailbox * @retval 0 Success * @retval -1 Failure * * Given an IMAP mailbox name, return host, port and a path IMAP servers will * recognize. mx.mbox is malloc'd, caller must free it */ int imap_parse_path(const char *path, struct ImapMbox *mx) { static unsigned short ImapPort = 0; static unsigned short ImapsPort = 0; struct servent *service = NULL; struct Url url; char *c = NULL; if (!ImapPort) { service = getservbyname("imap", "tcp"); if (service) ImapPort = ntohs(service->s_port); else ImapPort = IMAP_PORT; mutt_debug(3, "Using default IMAP port %d\n", ImapPort); } if (!ImapsPort) { service = getservbyname("imaps", "tcp"); if (service) ImapsPort = ntohs(service->s_port); else ImapsPort = IMAP_SSL_PORT; mutt_debug(3, "Using default IMAPS port %d\n", ImapsPort); } /* Defaults */ memset(&mx->account, 0, sizeof(mx->account)); mx->account.port = ImapPort; mx->account.type = MUTT_ACCT_TYPE_IMAP; c = mutt_str_strdup(path); url_parse(&url, c); if (url.scheme == U_IMAP || url.scheme == U_IMAPS) { if (mutt_account_fromurl(&mx->account, &url) < 0 || !*mx->account.host) { url_free(&url); FREE(&c); return -1; } mx->mbox = mutt_str_strdup(url.path); if (url.scheme == U_IMAPS) mx->account.flags |= MUTT_ACCT_SSL; url_free(&url); FREE(&c); } /* old PINE-compatibility code */ else { url_free(&url); FREE(&c); char tmp[128]; if (sscanf(path, "{%127[^}]}", tmp) != 1) return -1; c = strchr(path, '}'); if (!c) return -1; else { /* walk past closing '}' */ mx->mbox = mutt_str_strdup(c + 1); } c = strrchr(tmp, '@'); if (c) { *c = '\0'; mutt_str_strfcpy(mx->account.user, tmp, sizeof(mx->account.user)); mutt_str_strfcpy(tmp, c + 1, sizeof(tmp)); mx->account.flags |= MUTT_ACCT_USER; } const int n = sscanf(tmp, "%127[^:/]%127s", mx->account.host, tmp); if (n < 1) { mutt_debug(1, "NULL host in %s\n", path); FREE(&mx->mbox); return -1; } if (n > 1) { if (sscanf(tmp, ":%hu%127s", &(mx->account.port), tmp) >= 1) mx->account.flags |= MUTT_ACCT_PORT; if (sscanf(tmp, "/%s", tmp) == 1) { if (mutt_str_strncmp(tmp, "ssl", 3) == 0) mx->account.flags |= MUTT_ACCT_SSL; else { mutt_debug(1, "Unknown connection type in %s\n", path); FREE(&mx->mbox); return -1; } } } } if ((mx->account.flags & MUTT_ACCT_SSL) && !(mx->account.flags & MUTT_ACCT_PORT)) mx->account.port = ImapsPort; return 0; } /** * imap_mxcmp - Compare mailbox names, giving priority to INBOX * @param mx1 First mailbox name * @param mx2 Second mailbox name * @retval <0 First mailbox precedes Second mailbox * @retval 0 Mailboxes are the same * @retval >0 Second mailbox precedes First mailbox * * Like a normal sort function except that "INBOX" will be sorted to the * beginning of the list. */ int imap_mxcmp(const char *mx1, const char *mx2) { char *b1 = NULL; char *b2 = NULL; int rc; if (!mx1 || !*mx1) mx1 = "INBOX"; if (!mx2 || !*mx2) mx2 = "INBOX"; if ((mutt_str_strcasecmp(mx1, "INBOX") == 0) && (mutt_str_strcasecmp(mx2, "INBOX") == 0)) { return 0; } b1 = mutt_mem_malloc(strlen(mx1) + 1); b2 = mutt_mem_malloc(strlen(mx2) + 1); imap_fix_path(NULL, mx1, b1, strlen(mx1) + 1); imap_fix_path(NULL, mx2, b2, strlen(mx2) + 1); rc = mutt_str_strcmp(b1, b2); FREE(&b1); FREE(&b2); return rc; } /** * imap_pretty_mailbox - Prettify an IMAP mailbox name * @param path Mailbox name to be tidied * * Called by mutt_pretty_mailbox() to make IMAP paths look nice. */ void imap_pretty_mailbox(char *path) { struct ImapMbox home, target; struct Url url; char *delim = NULL; int tlen; int hlen = 0; bool home_match = false; if (imap_parse_path(path, &target) < 0) return; tlen = mutt_str_strlen(target.mbox); /* check whether we can do '=' substitution */ if (mx_is_imap(Folder) && !imap_parse_path(Folder, &home)) { hlen = mutt_str_strlen(home.mbox); if (tlen && mutt_account_match(&home.account, &target.account) && (mutt_str_strncmp(home.mbox, target.mbox, hlen) == 0)) { if (hlen == 0) home_match = true; else if (ImapDelimChars) { for (delim = ImapDelimChars; *delim != '\0'; delim++) if (target.mbox[hlen] == *delim) home_match = true; } } FREE(&home.mbox); } /* do the '=' substitution */ if (home_match) { *path++ = '='; /* copy remaining path, skipping delimiter */ if (hlen == 0) hlen = -1; memcpy(path, target.mbox + hlen + 1, tlen - hlen - 1); path[tlen - hlen - 1] = '\0'; } else { mutt_account_tourl(&target.account, &url); url.path = target.mbox; /* FIXME: That hard-coded constant is bogus. But we need the actual * size of the buffer from mutt_pretty_mailbox. And these pretty * operations usually shrink the result. Still... */ url_tostring(&url, path, 1024, 0); } FREE(&target.mbox); } /** * imap_continue - display a message and ask the user if they want to go on * @param msg Location of the error * @param resp Message for user * @retval num Result: #MUTT_YES, #MUTT_NO, #MUTT_ABORT */ int imap_continue(const char *msg, const char *resp) { imap_error(msg, resp); return mutt_yesorno(_("Continue?"), 0); } /** * imap_error - show an error and abort * @param where Location of the error * @param msg Message for user */ void imap_error(const char *where, const char *msg) { mutt_error("%s [%s]\n", where, msg); } /** * imap_new_idata - Allocate and initialise a new ImapData structure * @retval NULL Failure (no mem) * @retval ptr New ImapData */ struct ImapData *imap_new_idata(void) { struct ImapData *idata = mutt_mem_calloc(1, sizeof(struct ImapData)); idata->cmdbuf = mutt_buffer_new(); idata->cmdslots = ImapPipelineDepth + 2; idata->cmds = mutt_mem_calloc(idata->cmdslots, sizeof(*idata->cmds)); STAILQ_INIT(&idata->flags); STAILQ_INIT(&idata->mboxcache); return idata; } /** * imap_free_idata - Release and clear storage in an ImapData structure * @param idata Server data */ void imap_free_idata(struct ImapData **idata) { if (!idata) return; FREE(&(*idata)->capstr); mutt_list_free(&(*idata)->flags); imap_mboxcache_free(*idata); mutt_buffer_free(&(*idata)->cmdbuf); FREE(&(*idata)->buf); mutt_bcache_close(&(*idata)->bcache); FREE(&(*idata)->cmds); FREE(idata); } /** * imap_fix_path - Fix up the imap path * @param idata Server data * @param mailbox Mailbox path * @param path Buffer for the result * @param plen Length of buffer * @retval ptr Fixed-up path * * This is necessary because the rest of neomutt assumes a hierarchy delimiter of * '/', which is not necessarily true in IMAP. Additionally, the filesystem * converts multiple hierarchy delimiters into a single one, ie "///" is equal * to "/". IMAP servers are not required to do this. * Moreover, IMAP servers may dislike the path ending with the delimiter. */ char *imap_fix_path(struct ImapData *idata, const char *mailbox, char *path, size_t plen) { int i = 0; char delim = '\0'; if (idata) delim = idata->delim; while (mailbox && *mailbox && i < plen - 1) { if ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim)) { /* use connection delimiter if known. Otherwise use user delimiter */ if (!idata) delim = *mailbox; while (*mailbox && ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim))) { mailbox++; } path[i] = delim; } else { path[i] = *mailbox; mailbox++; } i++; } if (i && path[--i] != delim) i++; path[i] = '\0'; return path; } /** * imap_cachepath - Generate a cache path for a mailbox * @param idata Server data * @param mailbox Mailbox name * @param dest Buffer to store cache path * @param dlen Length of buffer */ void imap_cachepath(struct ImapData *idata, const char *mailbox, char *dest, size_t dlen) { char *s = NULL; const char *p = mailbox; for (s = dest; p && *p && dlen; dlen--) { if (*p == idata->delim) { *s = '/'; /* simple way to avoid collisions with UIDs */ if (*(p + 1) >= '0' && *(p + 1) <= '9') { if (--dlen) *++s = '_'; } } else *s = *p; p++; s++; } *s = '\0'; } /** * imap_get_literal_count - write number of bytes in an IMAP literal into bytes * @param[in] buf Number as a string * @param[out] bytes Resulting number * @retval 0 Success * @retval -1 Failure */ int imap_get_literal_count(const char *buf, unsigned int *bytes) { char *pc = NULL; char *pn = NULL; if (!buf || !(pc = strchr(buf, '{'))) return -1; pc++; pn = pc; while (isdigit((unsigned char) *pc)) pc++; *pc = '\0'; if (mutt_str_atoui(pn, bytes) < 0) return -1; return 0; } /** * imap_get_qualifier - Get the qualifier from a tagged response * @param buf Command string to process * @retval ptr Start of the qualifier * * In a tagged response, skip tag and status for the qualifier message. * Used by imap_copy_message for TRYCREATE */ char *imap_get_qualifier(char *buf) { char *s = buf; /* skip tag */ s = imap_next_word(s); /* skip OK/NO/BAD response */ s = imap_next_word(s); return s; } /** * imap_next_word - Find where the next IMAP word begins * @param s Command string to process * @retval ptr Next IMAP word */ char *imap_next_word(char *s) { int quoted = 0; while (*s) { if (*s == '\\') { s++; if (*s) s++; continue; } if (*s == '\"') quoted = quoted ? 0 : 1; if (!quoted && ISSPACE(*s)) break; s++; } SKIPWS(s); return s; } /** * imap_qualify_path - Make an absolute IMAP folder target * @param dest Buffer for the result * @param len Length of buffer * @param mx Imap mailbox * @param path Path relative to the mailbox * * given ImapMbox and relative path. */ void imap_qualify_path(char *dest, size_t len, struct ImapMbox *mx, char *path) { struct Url url; mutt_account_tourl(&mx->account, &url); url.path = path; url_tostring(&url, dest, len, 0); } /** * imap_quote_string - quote string according to IMAP rules * @param dest Buffer for the result * @param dlen Length of the buffer * @param src String to be quoted * * Surround string with quotes, escape " and \ with backslash */ void imap_quote_string(char *dest, size_t dlen, const char *src, bool quote_backtick) { const char *quote = "`\"\\"; if (!quote_backtick) quote++; char *pt = dest; const char *s = src; *pt++ = '"'; /* save room for trailing quote-char */ dlen -= 2; for (; *s && dlen; s++) { if (strchr(quote, *s)) { if (dlen < 2) break; dlen -= 2; *pt++ = '\\'; *pt++ = *s; } else { *pt++ = *s; dlen--; } } *pt++ = '"'; *pt = '\0'; } /** * imap_unquote_string - equally stupid unquoting routine * @param s String to be unquoted */ void imap_unquote_string(char *s) { char *d = s; if (*s == '\"') s++; else return; while (*s) { if (*s == '\"') { *d = '\0'; return; } if (*s == '\\') { s++; } if (*s) { *d = *s; d++; s++; } } *d = '\0'; } /** * imap_munge_mbox_name - Quote awkward characters in a mailbox name * @param idata Server data * @param dest Buffer to store safe mailbox name * @param dlen Length of buffer * @param src Mailbox name */ void imap_munge_mbox_name(struct ImapData *idata, char *dest, size_t dlen, const char *src) { char *buf = mutt_str_strdup(src); imap_utf_encode(idata, &buf); imap_quote_string(dest, dlen, buf, false); FREE(&buf); } /** * imap_unmunge_mbox_name - Remove quoting from a mailbox name * @param idata Server data * @param s Mailbox name * * The string will be altered in-place. */ void imap_unmunge_mbox_name(struct ImapData *idata, char *s) { imap_unquote_string(s); char *buf = mutt_str_strdup(s); if (buf) { imap_utf_decode(idata, &buf); strncpy(s, buf, strlen(s)); } FREE(&buf); } /** * imap_keepalive - poll the current folder to keep the connection alive */ void imap_keepalive(void) { struct Connection *conn = NULL; struct ImapData *idata = NULL; time_t now = time(NULL); TAILQ_FOREACH(conn, mutt_socket_head(), entries) { if (conn->account.type == MUTT_ACCT_TYPE_IMAP) { idata = conn->data; if (idata->state >= IMAP_AUTHENTICATED && now >= idata->lastread + ImapKeepalive) { imap_check(idata, 1); } } } } /** * imap_wait_keepalive - Wait for a process to change state * @param pid Process ID to listen to * @retval num 'wstatus' from waitpid() */ int imap_wait_keepalive(pid_t pid) { struct sigaction oldalrm; struct sigaction act; sigset_t oldmask; int rc; bool imap_passive = ImapPassive; ImapPassive = true; OptKeepQuiet = true; sigprocmask(SIG_SETMASK, NULL, &oldmask); sigemptyset(&act.sa_mask); act.sa_handler = mutt_sig_empty_handler; #ifdef SA_INTERRUPT act.sa_flags = SA_INTERRUPT; #else act.sa_flags = 0; #endif sigaction(SIGALRM, &act, &oldalrm); alarm(ImapKeepalive); while (waitpid(pid, &rc, 0) < 0 && errno == EINTR) { alarm(0); /* cancel a possibly pending alarm */ imap_keepalive(); alarm(ImapKeepalive); } alarm(0); /* cancel a possibly pending alarm */ sigaction(SIGALRM, &oldalrm, NULL); sigprocmask(SIG_SETMASK, &oldmask, NULL); OptKeepQuiet = false; if (!imap_passive) ImapPassive = false; return rc; } /** * imap_allow_reopen - Allow re-opening a folder upon expunge * @param ctx Context */ void imap_allow_reopen(struct Context *ctx) { struct ImapData *idata = NULL; if (!ctx || !ctx->data || ctx->magic != MUTT_IMAP) return; idata = ctx->data; if (idata->ctx == ctx) idata->reopen |= IMAP_REOPEN_ALLOW; } /** * imap_disallow_reopen - Disallow re-opening a folder upon expunge * @param ctx Context */ void imap_disallow_reopen(struct Context *ctx) { struct ImapData *idata = NULL; if (!ctx || !ctx->data || ctx->magic != MUTT_IMAP) return; idata = ctx->data; if (idata->ctx == ctx) idata->reopen &= ~IMAP_REOPEN_ALLOW; } /** * imap_account_match - Compare two Accounts * @param a1 First Account * @param a2 Second Account * @retval true Accounts match */ int imap_account_match(const struct Account *a1, const struct Account *a2) { struct ImapData *a1_idata = imap_conn_find(a1, MUTT_IMAP_CONN_NONEW); struct ImapData *a2_idata = imap_conn_find(a2, MUTT_IMAP_CONN_NONEW); const struct Account *a1_canon = a1_idata == NULL ? a1 : &a1_idata->conn->account; const struct Account *a2_canon = a2_idata == NULL ? a2 : &a2_idata->conn->account; return mutt_account_match(a1_canon, a2_canon); }
./CrossVul/dataset_final_sorted/CWE-191/c/good_247_0
crossvul-cpp_data_bad_2395_0
/***************************************************************************** * libmp4.c : LibMP4 library for mp4 module for vlc ***************************************************************************** * Copyright (C) 2001-2004, 2010 VLC authors and VideoLAN * * Author: Laurent Aimar <fenrir@via.ecp.fr> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_stream.h> /* stream_Peek*/ #ifdef HAVE_ZLIB_H # include <zlib.h> /* for compressed moov */ #endif #include "libmp4.h" #include "languages.h" #include <math.h> /* Some assumptions: * The input method HAS to be seekable */ /* convert 16.16 fixed point to floating point */ static double conv_fx( int32_t fx ) { double fp = fx; fp /= 65536.; return fp; } /* some functions for mp4 encoding of variables */ #ifdef MP4_VERBOSE static void MP4_ConvertDate2Str( char *psz, uint64_t i_date, bool b_relative ) { int i_day; int i_hour; int i_min; int i_sec; /* date begin at 1 jan 1904 */ if ( !b_relative ) i_date += ((INT64_C(1904) * 365) + 17) * 24 * 60 * 60; i_day = i_date / ( 60*60*24); i_hour = ( i_date /( 60*60 ) ) % 60; i_min = ( i_date / 60 ) % 60; i_sec = i_date % 60; sprintf( psz, "%dd-%2.2dh:%2.2dm:%2.2ds", i_day, i_hour, i_min, i_sec ); } #endif /***************************************************************************** * Some prototypes. *****************************************************************************/ static MP4_Box_t *MP4_ReadBox( stream_t *p_stream, MP4_Box_t *p_father ); /***************************************************************************** * MP4_ReadBoxCommon : Load only common parameters for all boxes ***************************************************************************** * p_box need to be an already allocated MP4_Box_t, and all data * will only be peek not read * * RETURN : 0 if it fail, 1 otherwise *****************************************************************************/ int MP4_ReadBoxCommon( stream_t *p_stream, MP4_Box_t *p_box ) { int i_read; const uint8_t *p_peek; if( ( ( i_read = stream_Peek( p_stream, &p_peek, 32 ) ) < 8 ) ) { return 0; } p_box->i_pos = stream_Tell( p_stream ); p_box->data.p_payload = NULL; p_box->p_father = NULL; p_box->p_first = NULL; p_box->p_last = NULL; p_box->p_next = NULL; MP4_GET4BYTES( p_box->i_shortsize ); MP4_GETFOURCC( p_box->i_type ); /* Now special case */ if( p_box->i_shortsize == 1 ) { /* get the true size on 64 bits */ MP4_GET8BYTES( p_box->i_size ); } else { p_box->i_size = p_box->i_shortsize; /* XXX size of 0 means that the box extends to end of file */ } if( p_box->i_type == ATOM_uuid ) { /* get extented type on 16 bytes */ GetUUID( &p_box->i_uuid, p_peek ); p_peek += 16; i_read -= 16; } else { CreateUUID( &p_box->i_uuid, p_box->i_type ); } #ifdef MP4_ULTRA_VERBOSE if( p_box->i_size ) { if MP4_BOX_TYPE_ASCII() msg_Dbg( p_stream, "found Box: %4.4s size %"PRId64" %"PRId64, (char*)&p_box->i_type, p_box->i_size, p_box->i_pos ); else msg_Dbg( p_stream, "found Box: c%3.3s size %"PRId64, (char*)&p_box->i_type+1, p_box->i_size ); } #endif return 1; } /***************************************************************************** * MP4_NextBox : Go to the next box ***************************************************************************** * if p_box == NULL, go to the next box in which we are( at the begining ). *****************************************************************************/ static int MP4_NextBox( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_t box; if( !p_box ) { if ( !MP4_ReadBoxCommon( p_stream, &box ) ) return 0; p_box = &box; } if( !p_box->i_size ) { return 2; /* Box with infinite size */ } if( p_box->p_father ) { /* if father's size == 0, it means unknown or infinite size, * and we skip the followong check */ if( p_box->p_father->i_size > 0 ) { const off_t i_box_end = p_box->i_size + p_box->i_pos; const off_t i_father_end = p_box->p_father->i_size + p_box->p_father->i_pos; /* check if it's within p-father */ if( i_box_end >= i_father_end ) { if( i_box_end > i_father_end ) msg_Dbg( p_stream, "out of bound child" ); return 0; /* out of bound */ } } } if( stream_Seek( p_stream, p_box->i_size + p_box->i_pos ) ) { return 0; } return 1; } /***************************************************************************** * For all known box a loader is given, * XXX: all common struct have to be already read by MP4_ReadBoxCommon * after called one of theses functions, file position is unknown * you need to call MP4_GotoBox to go where you want *****************************************************************************/ static int MP4_ReadBoxContainerChildrenIndexed( stream_t *p_stream, MP4_Box_t *p_container, uint32_t i_last_child, bool b_indexed ) { MP4_Box_t *p_box; /* Size of root container is set to 0 when unknown, for exemple * with a DASH stream. In that case, we skip the following check */ if( p_container->i_size && ( stream_Tell( p_stream ) + ((b_indexed)?16:8) > (off_t)(p_container->i_pos + p_container->i_size) ) ) { /* there is no box to load */ return 0; } do { uint32_t i_index = 0; if ( b_indexed ) { uint8_t read[8]; if ( stream_Read( p_stream, read, 8 ) < 8 ) return 0; i_index = GetDWBE(&read[4]); } if( ( p_box = MP4_ReadBox( p_stream, p_container ) ) == NULL ) continue; p_box->i_index = i_index; /* chain this box with the father and the other at same level */ if( !p_container->p_first ) p_container->p_first = p_box; else p_container->p_last->p_next = p_box; p_container->p_last = p_box; if( p_box->i_type == i_last_child ) { MP4_NextBox( p_stream, p_box ); break; } } while( MP4_NextBox( p_stream, p_box ) == 1 ); return 1; } int MP4_ReadBoxContainerChildren( stream_t *p_stream, MP4_Box_t *p_container, uint32_t i_last_child ) { return MP4_ReadBoxContainerChildrenIndexed( p_stream, p_container, i_last_child, false ); } static int MP4_ReadBoxContainerRaw( stream_t *p_stream, MP4_Box_t *p_container ) { return MP4_ReadBoxContainerChildren( p_stream, p_container, 0 ); } static int MP4_ReadBoxContainer( stream_t *p_stream, MP4_Box_t *p_container ) { if( p_container->i_size && ( p_container->i_size <= (size_t)mp4_box_headersize(p_container ) + 8 ) ) { /* container is empty, 8 stand for the first header in this box */ return 1; } /* enter box */ stream_Seek( p_stream, p_container->i_pos + mp4_box_headersize( p_container ) ); return MP4_ReadBoxContainerRaw( p_stream, p_container ); } static void MP4_FreeBox_Common( MP4_Box_t *p_box ) { /* Up to now do nothing */ (void)p_box; } static int MP4_ReadBoxSkip( stream_t *p_stream, MP4_Box_t *p_box ) { /* XXX sometime moov is hiden in a free box */ if( p_box->p_father && p_box->p_father->i_type == ATOM_root && p_box->i_type == ATOM_free ) { const uint8_t *p_peek; int i_read; vlc_fourcc_t i_fcc; i_read = stream_Peek( p_stream, &p_peek, 44 ); p_peek += mp4_box_headersize( p_box ) + 4; i_read -= mp4_box_headersize( p_box ) + 4; if( i_read >= 8 ) { i_fcc = VLC_FOURCC( p_peek[0], p_peek[1], p_peek[2], p_peek[3] ); if( i_fcc == ATOM_cmov || i_fcc == ATOM_mvhd ) { msg_Warn( p_stream, "detected moov hidden in a free box ..." ); p_box->i_type = ATOM_foov; return MP4_ReadBoxContainer( p_stream, p_box ); } } } /* Nothing to do */ #ifdef MP4_ULTRA_VERBOSE if MP4_BOX_TYPE_ASCII() msg_Dbg( p_stream, "skip box: \"%4.4s\"", (char*)&p_box->i_type ); else msg_Dbg( p_stream, "skip box: \"c%3.3s\"", (char*)&p_box->i_type+1 ); #endif return 1; } static int MP4_ReadBox_ilst( stream_t *p_stream, MP4_Box_t *p_box ) { if( p_box->i_size < 8 || stream_Read( p_stream, NULL, 8 ) < 8 ) return 0; /* Find our handler */ if ( !p_box->i_handler && p_box->p_father ) { const MP4_Box_t *p_sibling = p_box->p_father->p_first; while( p_sibling ) { if ( p_sibling->i_type == ATOM_hdlr && p_sibling->data.p_hdlr ) { p_box->i_handler = p_sibling->data.p_hdlr->i_handler_type; break; } p_sibling = p_sibling->p_next; } } switch( p_box->i_handler ) { case 0: msg_Warn( p_stream, "no handler for ilst atom" ); return 0; case HANDLER_mdta: return MP4_ReadBoxContainerChildrenIndexed( p_stream, p_box, 0, true ); case HANDLER_mdir: return MP4_ReadBoxContainerChildren( p_stream, p_box, 0 ); default: msg_Warn( p_stream, "Unknown ilst handler type '%4.4s'", (char*)&p_box->i_handler ); return 0; } } static int MP4_ReadBox_ftyp( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_ftyp_t ); MP4_GETFOURCC( p_box->data.p_ftyp->i_major_brand ); MP4_GET4BYTES( p_box->data.p_ftyp->i_minor_version ); if( ( p_box->data.p_ftyp->i_compatible_brands_count = i_read / 4 ) ) { uint32_t *tab = p_box->data.p_ftyp->i_compatible_brands = calloc( p_box->data.p_ftyp->i_compatible_brands_count, sizeof(uint32_t)); if( unlikely( tab == NULL ) ) MP4_READBOX_EXIT( 0 ); for( unsigned i = 0; i < p_box->data.p_ftyp->i_compatible_brands_count; i++ ) { MP4_GETFOURCC( tab[i] ); } } else { p_box->data.p_ftyp->i_compatible_brands = NULL; } MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_ftyp( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_ftyp->i_compatible_brands ); } static int MP4_ReadBox_mvhd( stream_t *p_stream, MP4_Box_t *p_box ) { #ifdef MP4_VERBOSE char s_creation_time[128]; char s_modification_time[128]; char s_duration[128]; #endif MP4_READBOX_ENTER( MP4_Box_data_mvhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mvhd ); if( p_box->data.p_mvhd->i_version ) { MP4_GET8BYTES( p_box->data.p_mvhd->i_creation_time ); MP4_GET8BYTES( p_box->data.p_mvhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mvhd->i_timescale ); MP4_GET8BYTES( p_box->data.p_mvhd->i_duration ); } else { MP4_GET4BYTES( p_box->data.p_mvhd->i_creation_time ); MP4_GET4BYTES( p_box->data.p_mvhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mvhd->i_timescale ); MP4_GET4BYTES( p_box->data.p_mvhd->i_duration ); } MP4_GET4BYTES( p_box->data.p_mvhd->i_rate ); MP4_GET2BYTES( p_box->data.p_mvhd->i_volume ); MP4_GET2BYTES( p_box->data.p_mvhd->i_reserved1 ); for( unsigned i = 0; i < 2; i++ ) { MP4_GET4BYTES( p_box->data.p_mvhd->i_reserved2[i] ); } for( unsigned i = 0; i < 9; i++ ) { MP4_GET4BYTES( p_box->data.p_mvhd->i_matrix[i] ); } for( unsigned i = 0; i < 6; i++ ) { MP4_GET4BYTES( p_box->data.p_mvhd->i_predefined[i] ); } MP4_GET4BYTES( p_box->data.p_mvhd->i_next_track_id ); #ifdef MP4_VERBOSE MP4_ConvertDate2Str( s_creation_time, p_box->data.p_mvhd->i_creation_time, false ); MP4_ConvertDate2Str( s_modification_time, p_box->data.p_mvhd->i_modification_time, false ); if( p_box->data.p_mvhd->i_rate ) { MP4_ConvertDate2Str( s_duration, p_box->data.p_mvhd->i_duration / p_box->data.p_mvhd->i_rate, true ); } else { s_duration[0] = 0; } msg_Dbg( p_stream, "read box: \"mvhd\" creation %s modification %s time scale %d duration %s rate %f volume %f next track id %d", s_creation_time, s_modification_time, (uint32_t)p_box->data.p_mvhd->i_timescale, s_duration, (float)p_box->data.p_mvhd->i_rate / (1<<16 ), (float)p_box->data.p_mvhd->i_volume / 256 , (uint32_t)p_box->data.p_mvhd->i_next_track_id ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mfhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_mfhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mvhd ); MP4_GET4BYTES( p_box->data.p_mfhd->i_sequence_number ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"mfhd\" sequence number %d", p_box->data.p_mfhd->i_sequence_number ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tfxd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tfxd_t ); MP4_Box_data_tfxd_t *p_tfxd_data = p_box->data.p_tfxd; MP4_GETVERSIONFLAGS( p_tfxd_data ); if( p_tfxd_data->i_version == 0 ) { MP4_GET4BYTES( p_tfxd_data->i_fragment_abs_time ); MP4_GET4BYTES( p_tfxd_data->i_fragment_duration ); } else { MP4_GET8BYTES( p_tfxd_data->i_fragment_abs_time ); MP4_GET8BYTES( p_tfxd_data->i_fragment_duration ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"tfxd\" version %d, flags 0x%x, "\ "fragment duration %"PRIu64", fragment abs time %"PRIu64, p_tfxd_data->i_version, p_tfxd_data->i_flags, p_tfxd_data->i_fragment_duration, p_tfxd_data->i_fragment_abs_time ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tfrf( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tfxd_t ); MP4_Box_data_tfrf_t *p_tfrf_data = p_box->data.p_tfrf; MP4_GETVERSIONFLAGS( p_tfrf_data ); MP4_GET1BYTE( p_tfrf_data->i_fragment_count ); p_tfrf_data->p_tfrf_data_fields = calloc( p_tfrf_data->i_fragment_count, sizeof( TfrfBoxDataFields_t ) ); if( !p_tfrf_data->p_tfrf_data_fields ) MP4_READBOX_EXIT( 0 ); for( uint8_t i = 0; i < p_tfrf_data->i_fragment_count; i++ ) { TfrfBoxDataFields_t *TfrfBoxDataField = &p_tfrf_data->p_tfrf_data_fields[i]; if( p_tfrf_data->i_version == 0 ) { MP4_GET4BYTES( TfrfBoxDataField->i_fragment_abs_time ); MP4_GET4BYTES( TfrfBoxDataField->i_fragment_duration ); } else { MP4_GET8BYTES( TfrfBoxDataField->i_fragment_abs_time ); MP4_GET8BYTES( TfrfBoxDataField->i_fragment_duration ); } } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"tfrf\" version %d, flags 0x%x, "\ "fragment count %"PRIu8, p_tfrf_data->i_version, p_tfrf_data->i_flags, p_tfrf_data->i_fragment_count ); for( uint8_t i = 0; i < p_tfrf_data->i_fragment_count; i++ ) { TfrfBoxDataFields_t *TfrfBoxDataField = &p_tfrf_data->p_tfrf_data_fields[i]; msg_Dbg( p_stream, "\"tfrf\" fragment duration %"PRIu64", "\ "fragment abs time %"PRIu64, TfrfBoxDataField->i_fragment_duration, TfrfBoxDataField->i_fragment_abs_time ); } #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_tfrf( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_tfrf->p_tfrf_data_fields ); } static int MP4_ReadBox_stra( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stra_t ); MP4_Box_data_stra_t *p_stra = p_box->data.p_stra; uint8_t i_reserved; VLC_UNUSED(i_reserved); MP4_GET1BYTE( p_stra->i_es_cat ); MP4_GET1BYTE( i_reserved ); MP4_GET2BYTES( p_stra->i_track_ID ); MP4_GET4BYTES( p_stra->i_timescale ); MP4_GET8BYTES( p_stra->i_duration ); MP4_GET4BYTES( p_stra->FourCC ); MP4_GET4BYTES( p_stra->Bitrate ); MP4_GET4BYTES( p_stra->MaxWidth ); MP4_GET4BYTES( p_stra->MaxHeight ); MP4_GET4BYTES( p_stra->SamplingRate ); MP4_GET4BYTES( p_stra->Channels ); MP4_GET4BYTES( p_stra->BitsPerSample ); MP4_GET4BYTES( p_stra->AudioTag ); MP4_GET2BYTES( p_stra->nBlockAlign ); MP4_GET1BYTE( i_reserved ); MP4_GET1BYTE( i_reserved ); MP4_GET1BYTE( i_reserved ); MP4_GET1BYTE( p_stra->cpd_len ); if( p_stra->cpd_len > i_read ) goto error; p_stra->CodecPrivateData = malloc( p_stra->cpd_len ); if( unlikely( p_stra->CodecPrivateData == NULL ) ) goto error; memcpy( p_stra->CodecPrivateData, p_peek, p_stra->cpd_len ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "es_cat is %"PRIu8", birate is %"PRIu32, p_stra->i_es_cat, p_stra->Bitrate ); #endif MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); } static void MP4_FreeBox_stra( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stra->CodecPrivateData ); } static int MP4_ReadBox_uuid( stream_t *p_stream, MP4_Box_t *p_box ) { if( !CmpUUID( &p_box->i_uuid, &TfrfBoxUUID ) ) return MP4_ReadBox_tfrf( p_stream, p_box ); if( !CmpUUID( &p_box->i_uuid, &TfxdBoxUUID ) ) return MP4_ReadBox_tfxd( p_stream, p_box ); if( !CmpUUID( &p_box->i_uuid, &SmooBoxUUID ) ) return MP4_ReadBoxContainer( p_stream, p_box ); if( !CmpUUID( &p_box->i_uuid, &StraBoxUUID ) ) return MP4_ReadBox_stra( p_stream, p_box ); msg_Warn( p_stream, "Unknown uuid type box" ); return 1; } static void MP4_FreeBox_uuid( MP4_Box_t *p_box ) { if( !CmpUUID( &p_box->i_uuid, &TfrfBoxUUID ) ) return MP4_FreeBox_tfrf( p_box ); if( !CmpUUID( &p_box->i_uuid, &TfxdBoxUUID ) ) return MP4_FreeBox_Common( p_box ); if( !CmpUUID( &p_box->i_uuid, &SmooBoxUUID ) ) return MP4_FreeBox_Common( p_box ); if( !CmpUUID( &p_box->i_uuid, &StraBoxUUID ) ) return MP4_FreeBox_stra( p_box ); } static int MP4_ReadBox_sidx( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_sidx_t ); MP4_Box_data_sidx_t *p_sidx_data = p_box->data.p_sidx; MP4_GETVERSIONFLAGS( p_sidx_data ); MP4_GET4BYTES( p_sidx_data->i_reference_ID ); MP4_GET4BYTES( p_sidx_data->i_timescale ); if( p_sidx_data->i_version == 0 ) { MP4_GET4BYTES( p_sidx_data->i_earliest_presentation_time ); MP4_GET4BYTES( p_sidx_data->i_first_offset ); } else { MP4_GET8BYTES( p_sidx_data->i_earliest_presentation_time ); MP4_GET8BYTES( p_sidx_data->i_first_offset ); } uint16_t i_reserved; VLC_UNUSED(i_reserved); MP4_GET2BYTES( i_reserved ); MP4_GET2BYTES( p_sidx_data->i_reference_count ); uint16_t i_count = p_sidx_data->i_reference_count; p_sidx_data->p_items = calloc( i_count, sizeof( MP4_Box_sidx_item_t ) ); uint32_t tmp; for( unsigned i = 0; i < i_count; i++ ) { MP4_GET4BYTES( tmp ); p_sidx_data->p_items[i].b_reference_type = (bool)((tmp & 0x80000000)>>24); p_sidx_data->p_items[i].i_referenced_size = tmp & 0x7fffffff; MP4_GET4BYTES( p_sidx_data->p_items[i].i_subsegment_duration ); MP4_GET4BYTES( tmp ); p_sidx_data->p_items[i].b_starts_with_SAP = (bool)((tmp & 0x80000000)>>24); p_sidx_data->p_items[i].i_SAP_type = (tmp & 0x70000000)>>24; p_sidx_data->p_items[i].i_SAP_delta_time = tmp & 0xfffffff; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"sidx\" version %d, flags 0x%x, "\ "ref_ID %"PRIu32", timescale %"PRIu32", ref_count %"PRIu16", "\ "first subsegmt duration %"PRIu32, p_sidx_data->i_version, p_sidx_data->i_flags, p_sidx_data->i_reference_ID, p_sidx_data->i_timescale, p_sidx_data->i_reference_count, p_sidx_data->p_items[0].i_subsegment_duration ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_sidx( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sidx->p_items ); } static int MP4_ReadBox_tfhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tfhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_tfhd ); if( p_box->data.p_tfhd->i_version != 0 ) { msg_Warn( p_stream, "'tfhd' box with version != 0. "\ " Don't know what to do with that, please patch" ); MP4_READBOX_EXIT( 0 ); } MP4_GET4BYTES( p_box->data.p_tfhd->i_track_ID ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DURATION_IS_EMPTY ) { msg_Dbg( p_stream, "'duration-is-empty' flag is present "\ "=> no samples for this time interval." ); p_box->data.p_tfhd->b_empty = true; } else p_box->data.p_tfhd->b_empty = false; if( p_box->data.p_tfhd->i_flags & MP4_TFHD_BASE_DATA_OFFSET ) MP4_GET8BYTES( p_box->data.p_tfhd->i_base_data_offset ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_SAMPLE_DESC_INDEX ) MP4_GET4BYTES( p_box->data.p_tfhd->i_sample_description_index ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_DURATION ) MP4_GET4BYTES( p_box->data.p_tfhd->i_default_sample_duration ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_SIZE ) MP4_GET4BYTES( p_box->data.p_tfhd->i_default_sample_size ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_FLAGS ) MP4_GET4BYTES( p_box->data.p_tfhd->i_default_sample_flags ); #ifdef MP4_VERBOSE char psz_base[128] = "\0"; char psz_desc[128] = "\0"; char psz_dura[128] = "\0"; char psz_size[128] = "\0"; char psz_flag[128] = "\0"; if( p_box->data.p_tfhd->i_flags & MP4_TFHD_BASE_DATA_OFFSET ) snprintf(psz_base, sizeof(psz_base), "base offset %"PRId64, p_box->data.p_tfhd->i_base_data_offset); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_SAMPLE_DESC_INDEX ) snprintf(psz_desc, sizeof(psz_desc), "sample description index %d", p_box->data.p_tfhd->i_sample_description_index); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_DURATION ) snprintf(psz_dura, sizeof(psz_dura), "sample duration %d", p_box->data.p_tfhd->i_default_sample_duration); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_SIZE ) snprintf(psz_size, sizeof(psz_size), "sample size %d", p_box->data.p_tfhd->i_default_sample_size); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_FLAGS ) snprintf(psz_flag, sizeof(psz_flag), "sample flags 0x%x", p_box->data.p_tfhd->i_default_sample_flags); msg_Dbg( p_stream, "read box: \"tfhd\" version %d flags 0x%x track ID %d %s %s %s %s %s", p_box->data.p_tfhd->i_version, p_box->data.p_tfhd->i_flags, p_box->data.p_tfhd->i_track_ID, psz_base, psz_desc, psz_dura, psz_size, psz_flag ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_trun( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_trun_t ); MP4_GETVERSIONFLAGS( p_box->data.p_trun ); MP4_GET4BYTES( p_box->data.p_trun->i_sample_count ); if( p_box->data.p_trun->i_flags & MP4_TRUN_DATA_OFFSET ) MP4_GET4BYTES( p_box->data.p_trun->i_data_offset ); if( p_box->data.p_trun->i_flags & MP4_TRUN_FIRST_FLAGS ) MP4_GET4BYTES( p_box->data.p_trun->i_first_sample_flags ); p_box->data.p_trun->p_samples = calloc( p_box->data.p_trun->i_sample_count, sizeof(MP4_descriptor_trun_sample_t) ); if ( p_box->data.p_trun->p_samples == NULL ) MP4_READBOX_EXIT( 0 ); for( unsigned int i = 0; i<p_box->data.p_trun->i_sample_count; i++ ) { MP4_descriptor_trun_sample_t *p_sample = &p_box->data.p_trun->p_samples[i]; if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_DURATION ) MP4_GET4BYTES( p_sample->i_duration ); if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_SIZE ) MP4_GET4BYTES( p_sample->i_size ); if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_FLAGS ) MP4_GET4BYTES( p_sample->i_flags ); if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_TIME_OFFSET ) MP4_GET4BYTES( p_sample->i_composition_time_offset ); } #ifdef MP4_ULTRA_VERBOSE msg_Dbg( p_stream, "read box: \"trun\" version %u flags 0x%x sample count %u", p_box->data.p_trun->i_version, p_box->data.p_trun->i_flags, p_box->data.p_trun->i_sample_count ); for( unsigned int i = 0; i<p_box->data.p_trun->i_sample_count; i++ ) { MP4_descriptor_trun_sample_t *p_sample = &p_box->data.p_trun->p_samples[i]; msg_Dbg( p_stream, "read box: \"trun\" sample %4.4u flags 0x%x "\ "duration %"PRIu32" size %"PRIu32" composition time offset %"PRIu32, i, p_sample->i_flags, p_sample->i_duration, p_sample->i_size, p_sample->i_composition_time_offset ); } #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_trun( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_trun->p_samples ); } static int MP4_ReadBox_tkhd( stream_t *p_stream, MP4_Box_t *p_box ) { #ifdef MP4_VERBOSE char s_creation_time[128]; char s_modification_time[128]; char s_duration[128]; #endif MP4_READBOX_ENTER( MP4_Box_data_tkhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_tkhd ); if( p_box->data.p_tkhd->i_version ) { MP4_GET8BYTES( p_box->data.p_tkhd->i_creation_time ); MP4_GET8BYTES( p_box->data.p_tkhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_tkhd->i_track_ID ); MP4_GET4BYTES( p_box->data.p_tkhd->i_reserved ); MP4_GET8BYTES( p_box->data.p_tkhd->i_duration ); } else { MP4_GET4BYTES( p_box->data.p_tkhd->i_creation_time ); MP4_GET4BYTES( p_box->data.p_tkhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_tkhd->i_track_ID ); MP4_GET4BYTES( p_box->data.p_tkhd->i_reserved ); MP4_GET4BYTES( p_box->data.p_tkhd->i_duration ); } for( unsigned i = 0; i < 2; i++ ) { MP4_GET4BYTES( p_box->data.p_tkhd->i_reserved2[i] ); } MP4_GET2BYTES( p_box->data.p_tkhd->i_layer ); MP4_GET2BYTES( p_box->data.p_tkhd->i_predefined ); MP4_GET2BYTES( p_box->data.p_tkhd->i_volume ); MP4_GET2BYTES( p_box->data.p_tkhd->i_reserved3 ); for( unsigned i = 0; i < 9; i++ ) { MP4_GET4BYTES( p_box->data.p_tkhd->i_matrix[i] ); } MP4_GET4BYTES( p_box->data.p_tkhd->i_width ); MP4_GET4BYTES( p_box->data.p_tkhd->i_height ); double rotation; //angle in degrees to be rotated clockwise double scale[2]; // scale factor; sx = scale[0] , sy = scale[1] double translate[2];// amount to translate; tx = translate[0] , ty = translate[1] int32_t *matrix = p_box->data.p_tkhd->i_matrix; translate[0] = conv_fx(matrix[6]); translate[1] = conv_fx(matrix[7]); scale[0] = sqrt(conv_fx(matrix[0]) * conv_fx(matrix[0]) + conv_fx(matrix[3]) * conv_fx(matrix[3])); scale[1] = sqrt(conv_fx(matrix[1]) * conv_fx(matrix[1]) + conv_fx(matrix[4]) * conv_fx(matrix[4])); rotation = atan2(conv_fx(matrix[1]) / scale[1], conv_fx(matrix[0]) / scale[0]) * 180 / M_PI; if (rotation < 0) rotation += 360.; p_box->data.p_tkhd->f_rotation = rotation; #ifdef MP4_VERBOSE MP4_ConvertDate2Str( s_creation_time, p_box->data.p_mvhd->i_creation_time, false ); MP4_ConvertDate2Str( s_modification_time, p_box->data.p_mvhd->i_modification_time, false ); MP4_ConvertDate2Str( s_duration, p_box->data.p_mvhd->i_duration, true ); msg_Dbg( p_stream, "read box: \"tkhd\" creation %s modification %s duration %s track ID %d layer %d volume %f rotation %f scaleX %f scaleY %f translateX %f translateY %f width %f height %f. " "Matrix: %i %i %i %i %i %i %i %i %i", s_creation_time, s_modification_time, s_duration, p_box->data.p_tkhd->i_track_ID, p_box->data.p_tkhd->i_layer, (float)p_box->data.p_tkhd->i_volume / 256 , rotation, scale[0], scale[1], translate[0], translate[1], (float)p_box->data.p_tkhd->i_width / BLOCK16x16, (float)p_box->data.p_tkhd->i_height / BLOCK16x16, p_box->data.p_tkhd->i_matrix[0], p_box->data.p_tkhd->i_matrix[1], p_box->data.p_tkhd->i_matrix[2], p_box->data.p_tkhd->i_matrix[3], p_box->data.p_tkhd->i_matrix[4], p_box->data.p_tkhd->i_matrix[5], p_box->data.p_tkhd->i_matrix[6], p_box->data.p_tkhd->i_matrix[7], p_box->data.p_tkhd->i_matrix[8] ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_load( stream_t *p_stream, MP4_Box_t *p_box ) { if ( p_box->i_size != 24 ) return 0; MP4_READBOX_ENTER( MP4_Box_data_load_t ); MP4_GET4BYTES( p_box->data.p_load->i_start_time ); MP4_GET4BYTES( p_box->data.p_load->i_duration ); MP4_GET4BYTES( p_box->data.p_load->i_flags ); MP4_GET4BYTES( p_box->data.p_load->i_hints ); MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mdhd( stream_t *p_stream, MP4_Box_t *p_box ) { uint16_t i_language; #ifdef MP4_VERBOSE char s_creation_time[128]; char s_modification_time[128]; char s_duration[128]; #endif MP4_READBOX_ENTER( MP4_Box_data_mdhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mdhd ); if( p_box->data.p_mdhd->i_version ) { MP4_GET8BYTES( p_box->data.p_mdhd->i_creation_time ); MP4_GET8BYTES( p_box->data.p_mdhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mdhd->i_timescale ); MP4_GET8BYTES( p_box->data.p_mdhd->i_duration ); } else { MP4_GET4BYTES( p_box->data.p_mdhd->i_creation_time ); MP4_GET4BYTES( p_box->data.p_mdhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mdhd->i_timescale ); MP4_GET4BYTES( p_box->data.p_mdhd->i_duration ); } MP4_GET2BYTES( i_language ); decodeQtLanguageCode( i_language, p_box->data.p_mdhd->rgs_language, &p_box->data.p_mdhd->b_mac_encoding ); MP4_GET2BYTES( p_box->data.p_mdhd->i_quality ); #ifdef MP4_VERBOSE MP4_ConvertDate2Str( s_creation_time, p_box->data.p_mdhd->i_creation_time, false ); MP4_ConvertDate2Str( s_modification_time, p_box->data.p_mdhd->i_modification_time, false ); MP4_ConvertDate2Str( s_duration, p_box->data.p_mdhd->i_duration, true ); msg_Dbg( p_stream, "read box: \"mdhd\" creation %s modification %s time scale %d duration %s language %3.3s", s_creation_time, s_modification_time, (uint32_t)p_box->data.p_mdhd->i_timescale, s_duration, (char*) &p_box->data.p_mdhd->rgs_language ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_hdlr( stream_t *p_stream, MP4_Box_t *p_box ) { int32_t i_reserved; VLC_UNUSED(i_reserved); MP4_READBOX_ENTER( MP4_Box_data_hdlr_t ); MP4_GETVERSIONFLAGS( p_box->data.p_hdlr ); MP4_GETFOURCC( p_box->data.p_hdlr->i_predefined ); MP4_GETFOURCC( p_box->data.p_hdlr->i_handler_type ); MP4_GET4BYTES( i_reserved ); MP4_GET4BYTES( i_reserved ); MP4_GET4BYTES( i_reserved ); p_box->data.p_hdlr->psz_name = NULL; if( i_read > 0 ) { uint8_t *psz = p_box->data.p_hdlr->psz_name = malloc( i_read + 1 ); if( unlikely( psz == NULL ) ) MP4_READBOX_EXIT( 0 ); /* Yes, I love .mp4 :( */ if( p_box->data.p_hdlr->i_predefined == VLC_FOURCC( 'm', 'h', 'l', 'r' ) ) { uint8_t i_len; int i_copy; MP4_GET1BYTE( i_len ); i_copy = __MIN( i_read, i_len ); memcpy( psz, p_peek, i_copy ); p_box->data.p_hdlr->psz_name[i_copy] = '\0'; } else { memcpy( psz, p_peek, i_read ); p_box->data.p_hdlr->psz_name[i_read] = '\0'; } } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"hdlr\" handler type: \"%4.4s\" name: \"%s\"", (char*)&p_box->data.p_hdlr->i_handler_type, p_box->data.p_hdlr->psz_name ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_hdlr( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_hdlr->psz_name ); } static int MP4_ReadBox_vmhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_vmhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_vmhd ); MP4_GET2BYTES( p_box->data.p_vmhd->i_graphics_mode ); for( unsigned i = 0; i < 3; i++ ) { MP4_GET2BYTES( p_box->data.p_vmhd->i_opcolor[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"vmhd\" graphics-mode %d opcolor (%d, %d, %d)", p_box->data.p_vmhd->i_graphics_mode, p_box->data.p_vmhd->i_opcolor[0], p_box->data.p_vmhd->i_opcolor[1], p_box->data.p_vmhd->i_opcolor[2] ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_smhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_smhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_smhd ); MP4_GET2BYTES( p_box->data.p_smhd->i_balance ); MP4_GET2BYTES( p_box->data.p_smhd->i_reserved ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"smhd\" balance %f", (float)p_box->data.p_smhd->i_balance / 256 ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_hmhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_hmhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_hmhd ); MP4_GET2BYTES( p_box->data.p_hmhd->i_max_PDU_size ); MP4_GET2BYTES( p_box->data.p_hmhd->i_avg_PDU_size ); MP4_GET4BYTES( p_box->data.p_hmhd->i_max_bitrate ); MP4_GET4BYTES( p_box->data.p_hmhd->i_avg_bitrate ); MP4_GET4BYTES( p_box->data.p_hmhd->i_reserved ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"hmhd\" maxPDU-size %d avgPDU-size %d max-bitrate %d avg-bitrate %d", p_box->data.p_hmhd->i_max_PDU_size, p_box->data.p_hmhd->i_avg_PDU_size, p_box->data.p_hmhd->i_max_bitrate, p_box->data.p_hmhd->i_avg_bitrate ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_url( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_url_t ); MP4_GETVERSIONFLAGS( p_box->data.p_url ); MP4_GETSTRINGZ( p_box->data.p_url->psz_location ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"url\" url: %s", p_box->data.p_url->psz_location ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_url( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_url->psz_location ); } static int MP4_ReadBox_urn( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_urn_t ); MP4_GETVERSIONFLAGS( p_box->data.p_urn ); MP4_GETSTRINGZ( p_box->data.p_urn->psz_name ); MP4_GETSTRINGZ( p_box->data.p_urn->psz_location ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"urn\" name %s location %s", p_box->data.p_urn->psz_name, p_box->data.p_urn->psz_location ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_urn( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_urn->psz_name ); FREENULL( p_box->data.p_urn->psz_location ); } static int MP4_ReadBox_dref( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dref_t ); MP4_GETVERSIONFLAGS( p_box->data.p_dref ); MP4_GET4BYTES( p_box->data.p_dref->i_entry_count ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dref\" entry-count %d", p_box->data.p_dref->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stts( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stts->pi_sample_count ); FREENULL( p_box->data.p_stts->pi_sample_delta ); } static int MP4_ReadBox_stts( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stts_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stts ); MP4_GET4BYTES( p_box->data.p_stts->i_entry_count ); p_box->data.p_stts->pi_sample_count = calloc( p_box->data.p_stts->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stts->pi_sample_delta = calloc( p_box->data.p_stts->i_entry_count, sizeof(int32_t) ); if( p_box->data.p_stts->pi_sample_count == NULL || p_box->data.p_stts->pi_sample_delta == NULL ) { MP4_READBOX_EXIT( 0 ); } uint32_t i = 0; for( ; (i < p_box->data.p_stts->i_entry_count )&&( i_read >=8 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stts->pi_sample_count[i] ); MP4_GET4BYTES( p_box->data.p_stts->pi_sample_delta[i] ); } if ( i < p_box->data.p_stts->i_entry_count ) p_box->data.p_stts->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stts\" entry-count %d", p_box->data.p_stts->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_ctts( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_ctts->pi_sample_count ); FREENULL( p_box->data.p_ctts->pi_sample_offset ); } static int MP4_ReadBox_ctts( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_ctts_t ); MP4_GETVERSIONFLAGS( p_box->data.p_ctts ); MP4_GET4BYTES( p_box->data.p_ctts->i_entry_count ); p_box->data.p_ctts->pi_sample_count = calloc( p_box->data.p_ctts->i_entry_count, sizeof(uint32_t) ); p_box->data.p_ctts->pi_sample_offset = calloc( p_box->data.p_ctts->i_entry_count, sizeof(int32_t) ); if( ( p_box->data.p_ctts->pi_sample_count == NULL ) || ( p_box->data.p_ctts->pi_sample_offset == NULL ) ) { MP4_READBOX_EXIT( 0 ); } uint32_t i = 0; for( ; (i < p_box->data.p_ctts->i_entry_count )&&( i_read >=8 ); i++ ) { MP4_GET4BYTES( p_box->data.p_ctts->pi_sample_count[i] ); MP4_GET4BYTES( p_box->data.p_ctts->pi_sample_offset[i] ); } if ( i < p_box->data.p_ctts->i_entry_count ) p_box->data.p_ctts->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"ctts\" entry-count %d", p_box->data.p_ctts->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadLengthDescriptor( uint8_t **pp_peek, int64_t *i_read ) { unsigned int i_b; unsigned int i_len = 0; do { i_b = **pp_peek; (*pp_peek)++; (*i_read)--; i_len = ( i_len << 7 ) + ( i_b&0x7f ); } while( i_b&0x80 ); return( i_len ); } static void MP4_FreeBox_esds( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_esds->es_descriptor.psz_URL ); if( p_box->data.p_esds->es_descriptor.p_decConfigDescr ) { FREENULL( p_box->data.p_esds->es_descriptor.p_decConfigDescr->p_decoder_specific_info ); FREENULL( p_box->data.p_esds->es_descriptor.p_decConfigDescr ); } } static int MP4_ReadBox_esds( stream_t *p_stream, MP4_Box_t *p_box ) { #define es_descriptor p_box->data.p_esds->es_descriptor unsigned int i_len; unsigned int i_flags; unsigned int i_type; MP4_READBOX_ENTER( MP4_Box_data_esds_t ); MP4_GETVERSIONFLAGS( p_box->data.p_esds ); MP4_GET1BYTE( i_type ); if( i_type == 0x03 ) /* MP4ESDescrTag ISO/IEC 14496-1 8.3.3 */ { i_len = MP4_ReadLengthDescriptor( &p_peek, &i_read ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "found esds MPEG4ESDescr (%dBytes)", i_len ); #endif MP4_GET2BYTES( es_descriptor.i_ES_ID ); MP4_GET1BYTE( i_flags ); es_descriptor.b_stream_dependence = ( (i_flags&0x80) != 0); es_descriptor.b_url = ( (i_flags&0x40) != 0); es_descriptor.b_OCRstream = ( (i_flags&0x20) != 0); es_descriptor.i_stream_priority = i_flags&0x1f; if( es_descriptor.b_stream_dependence ) { MP4_GET2BYTES( es_descriptor.i_depend_on_ES_ID ); } if( es_descriptor.b_url ) { unsigned int i_len; MP4_GET1BYTE( i_len ); i_len = __MIN(i_read, i_len); es_descriptor.psz_URL = malloc( i_len + 1 ); if( es_descriptor.psz_URL ) { memcpy( es_descriptor.psz_URL, p_peek, i_len ); es_descriptor.psz_URL[i_len] = 0; } p_peek += i_len; i_read -= i_len; } else { es_descriptor.psz_URL = NULL; } if( es_descriptor.b_OCRstream ) { MP4_GET2BYTES( es_descriptor.i_OCR_ES_ID ); } MP4_GET1BYTE( i_type ); /* get next type */ } if( i_type != 0x04)/* MP4DecConfigDescrTag ISO/IEC 14496-1 8.3.4 */ { es_descriptor.p_decConfigDescr = NULL; MP4_READBOX_EXIT( 1 ); /* rest isn't interesting up to now */ } i_len = MP4_ReadLengthDescriptor( &p_peek, &i_read ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "found esds MP4DecConfigDescr (%dBytes)", i_len ); #endif es_descriptor.p_decConfigDescr = calloc( 1, sizeof( MP4_descriptor_decoder_config_t )); if( unlikely( es_descriptor.p_decConfigDescr == NULL ) ) MP4_READBOX_EXIT( 0 ); MP4_GET1BYTE( es_descriptor.p_decConfigDescr->i_objectProfileIndication ); MP4_GET1BYTE( i_flags ); es_descriptor.p_decConfigDescr->i_streamType = i_flags >> 2; es_descriptor.p_decConfigDescr->b_upStream = ( i_flags >> 1 )&0x01; MP4_GET3BYTES( es_descriptor.p_decConfigDescr->i_buffer_sizeDB ); MP4_GET4BYTES( es_descriptor.p_decConfigDescr->i_max_bitrate ); MP4_GET4BYTES( es_descriptor.p_decConfigDescr->i_avg_bitrate ); MP4_GET1BYTE( i_type ); if( i_type != 0x05 )/* MP4DecSpecificDescrTag ISO/IEC 14496-1 8.3.5 */ { es_descriptor.p_decConfigDescr->i_decoder_specific_info_len = 0; es_descriptor.p_decConfigDescr->p_decoder_specific_info = NULL; MP4_READBOX_EXIT( 1 ); } i_len = MP4_ReadLengthDescriptor( &p_peek, &i_read ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "found esds MP4DecSpecificDescr (%dBytes)", i_len ); #endif if( i_len > i_read ) MP4_READBOX_EXIT( 0 ); es_descriptor.p_decConfigDescr->i_decoder_specific_info_len = i_len; es_descriptor.p_decConfigDescr->p_decoder_specific_info = malloc( i_len ); if( unlikely( es_descriptor.p_decConfigDescr->p_decoder_specific_info == NULL ) ) MP4_READBOX_EXIT( 0 ); memcpy( es_descriptor.p_decConfigDescr->p_decoder_specific_info, p_peek, i_len ); MP4_READBOX_EXIT( 1 ); #undef es_descriptor } static void MP4_FreeBox_hvcC(MP4_Box_t *p_box ) { MP4_Box_data_hvcC_t *p_hvcC = p_box->data.p_hvcC; if( p_hvcC->i_hvcC > 0 ) FREENULL( p_hvcC->p_hvcC) ; } static int MP4_ReadBox_hvcC( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_hvcC_t *p_hvcC; MP4_READBOX_ENTER( MP4_Box_data_hvcC_t ); p_hvcC = p_box->data.p_hvcC; p_hvcC->i_hvcC = i_read; if( p_hvcC->i_hvcC > 0 ) { uint8_t * p = p_hvcC->p_hvcC = malloc( p_hvcC->i_hvcC ); if( p ) memcpy( p, p_peek, i_read ); } MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_avcC( MP4_Box_t *p_box ) { MP4_Box_data_avcC_t *p_avcC = p_box->data.p_avcC; int i; if( p_avcC->i_avcC > 0 ) FREENULL( p_avcC->p_avcC ); if( p_avcC->sps ) { for( i = 0; i < p_avcC->i_sps; i++ ) FREENULL( p_avcC->sps[i] ); } if( p_avcC->pps ) { for( i = 0; i < p_avcC->i_pps; i++ ) FREENULL( p_avcC->pps[i] ); } if( p_avcC->i_sps > 0 ) FREENULL( p_avcC->sps ); if( p_avcC->i_sps > 0 ) FREENULL( p_avcC->i_sps_length ); if( p_avcC->i_pps > 0 ) FREENULL( p_avcC->pps ); if( p_avcC->i_pps > 0 ) FREENULL( p_avcC->i_pps_length ); } static int MP4_ReadBox_avcC( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_avcC_t *p_avcC; int i; MP4_READBOX_ENTER( MP4_Box_data_avcC_t ); p_avcC = p_box->data.p_avcC; p_avcC->i_avcC = i_read; if( p_avcC->i_avcC > 0 ) { uint8_t * p = p_avcC->p_avcC = malloc( p_avcC->i_avcC ); if( p ) memcpy( p, p_peek, i_read ); } MP4_GET1BYTE( p_avcC->i_version ); MP4_GET1BYTE( p_avcC->i_profile ); MP4_GET1BYTE( p_avcC->i_profile_compatibility ); MP4_GET1BYTE( p_avcC->i_level ); MP4_GET1BYTE( p_avcC->i_reserved1 ); p_avcC->i_length_size = (p_avcC->i_reserved1&0x03) + 1; p_avcC->i_reserved1 >>= 2; MP4_GET1BYTE( p_avcC->i_reserved2 ); p_avcC->i_sps = p_avcC->i_reserved2&0x1f; p_avcC->i_reserved2 >>= 5; if( p_avcC->i_sps > 0 ) { p_avcC->i_sps_length = calloc( p_avcC->i_sps, sizeof( uint16_t ) ); p_avcC->sps = calloc( p_avcC->i_sps, sizeof( uint8_t* ) ); if( !p_avcC->i_sps_length || !p_avcC->sps ) goto error; for( i = 0; i < p_avcC->i_sps && i_read > 2; i++ ) { MP4_GET2BYTES( p_avcC->i_sps_length[i] ); if ( p_avcC->i_sps_length[i] > i_read ) goto error; p_avcC->sps[i] = malloc( p_avcC->i_sps_length[i] ); if( p_avcC->sps[i] ) memcpy( p_avcC->sps[i], p_peek, p_avcC->i_sps_length[i] ); p_peek += p_avcC->i_sps_length[i]; i_read -= p_avcC->i_sps_length[i]; } if ( i != p_avcC->i_sps ) goto error; } MP4_GET1BYTE( p_avcC->i_pps ); if( p_avcC->i_pps > 0 ) { p_avcC->i_pps_length = calloc( p_avcC->i_pps, sizeof( uint16_t ) ); p_avcC->pps = calloc( p_avcC->i_pps, sizeof( uint8_t* ) ); if( !p_avcC->i_pps_length || !p_avcC->pps ) goto error; for( i = 0; i < p_avcC->i_pps && i_read > 2; i++ ) { MP4_GET2BYTES( p_avcC->i_pps_length[i] ); if( p_avcC->i_pps_length[i] > i_read ) goto error; p_avcC->pps[i] = malloc( p_avcC->i_pps_length[i] ); if( p_avcC->pps[i] ) memcpy( p_avcC->pps[i], p_peek, p_avcC->i_pps_length[i] ); p_peek += p_avcC->i_pps_length[i]; i_read -= p_avcC->i_pps_length[i]; } if ( i != p_avcC->i_pps ) goto error; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"avcC\" version=%d profile=0x%x level=0x%x length size=%d sps=%d pps=%d", p_avcC->i_version, p_avcC->i_profile, p_avcC->i_level, p_avcC->i_length_size, p_avcC->i_sps, p_avcC->i_pps ); for( i = 0; i < p_avcC->i_sps; i++ ) { msg_Dbg( p_stream, " - sps[%d] length=%d", i, p_avcC->i_sps_length[i] ); } for( i = 0; i < p_avcC->i_pps; i++ ) { msg_Dbg( p_stream, " - pps[%d] length=%d", i, p_avcC->i_pps_length[i] ); } #endif MP4_READBOX_EXIT( 1 ); error: MP4_FreeBox_avcC( p_box ); MP4_READBOX_EXIT( 0 ); } static int MP4_ReadBox_WMA2( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_WMA2_t ); MP4_Box_data_WMA2_t *p_WMA2 = p_box->data.p_WMA2; MP4_GET2BYTESLE( p_WMA2->Format.wFormatTag ); MP4_GET2BYTESLE( p_WMA2->Format.nChannels ); MP4_GET4BYTESLE( p_WMA2->Format.nSamplesPerSec ); MP4_GET4BYTESLE( p_WMA2->Format.nAvgBytesPerSec ); MP4_GET2BYTESLE( p_WMA2->Format.nBlockAlign ); MP4_GET2BYTESLE( p_WMA2->Format.wBitsPerSample ); uint16_t i_cbSize; MP4_GET2BYTESLE( i_cbSize ); if ( i_read < 0 || i_cbSize > i_read ) goto error; p_WMA2->i_extra = i_cbSize; if ( p_WMA2->i_extra ) { p_WMA2->p_extra = malloc( p_WMA2->i_extra ); if ( ! p_WMA2->p_extra ) goto error; memcpy( p_WMA2->p_extra, p_peek, p_WMA2->i_extra ); } MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); } static void MP4_FreeBox_WMA2( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_WMA2->p_extra ); } static int MP4_ReadBox_strf( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_strf_t ); MP4_Box_data_strf_t *p_strf = p_box->data.p_strf; MP4_GET4BYTESLE( p_strf->bmiHeader.biSize ); MP4_GET4BYTESLE( p_strf->bmiHeader.biWidth ); MP4_GET4BYTESLE( p_strf->bmiHeader.biHeight ); MP4_GET2BYTESLE( p_strf->bmiHeader.biPlanes ); MP4_GET2BYTESLE( p_strf->bmiHeader.biBitCount ); MP4_GETFOURCC( p_strf->bmiHeader.biCompression ); MP4_GET4BYTESLE( p_strf->bmiHeader.biSizeImage ); MP4_GET4BYTESLE( p_strf->bmiHeader.biXPelsPerMeter ); MP4_GET4BYTESLE( p_strf->bmiHeader.biYPelsPerMeter ); MP4_GET4BYTESLE( p_strf->bmiHeader.biClrUsed ); MP4_GET4BYTESLE( p_strf->bmiHeader.biClrImportant ); if ( i_read < 0 ) goto error; p_strf->i_extra = i_read; if ( p_strf->i_extra ) { p_strf->p_extra = malloc( p_strf->i_extra ); if ( ! p_strf->p_extra ) goto error; memcpy( p_strf->p_extra, p_peek, i_read ); } MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); } static void MP4_FreeBox_strf( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_strf->p_extra ); } static int MP4_ReadBox_ASF( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_ASF_t ); MP4_Box_data_ASF_t *p_asf = p_box->data.p_asf; if (i_read != 8) MP4_READBOX_EXIT( 0 ); MP4_GET1BYTE( p_asf->i_stream_number ); /* remaining is unknown */ MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stsdext_chan( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_chan_t ); MP4_Box_data_chan_t *p_chan = p_box->data.p_chan; if ( i_read < 16 ) MP4_READBOX_EXIT( 0 ); MP4_GET1BYTE( p_chan->i_version ); MP4_GET3BYTES( p_chan->i_channels_flags ); MP4_GET4BYTES( p_chan->layout.i_channels_layout_tag ); MP4_GET4BYTES( p_chan->layout.i_channels_bitmap ); MP4_GET4BYTES( p_chan->layout.i_channels_description_count ); size_t i_descsize = 8 + 3 * sizeof(float); if ( (size_t)i_read < p_chan->layout.i_channels_description_count * i_descsize ) MP4_READBOX_EXIT( 0 ); p_chan->layout.p_descriptions = malloc( p_chan->layout.i_channels_description_count * i_descsize ); if ( !p_chan->layout.p_descriptions ) MP4_READBOX_EXIT( 0 ); uint32_t i; for( i=0; i<p_chan->layout.i_channels_description_count; i++ ) { if ( i_read < 20 ) break; MP4_GET4BYTES( p_chan->layout.p_descriptions[i].i_channel_label ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].i_channel_flags ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].f_coordinates[0] ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].f_coordinates[1] ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].f_coordinates[2] ); } if ( i<p_chan->layout.i_channels_description_count ) p_chan->layout.i_channels_description_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"chan\" flags=0x%x tag=0x%x bitmap=0x%x descriptions=%u", p_chan->i_channels_flags, p_chan->layout.i_channels_layout_tag, p_chan->layout.i_channels_bitmap, p_chan->layout.i_channels_description_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stsdext_chan( MP4_Box_t *p_box ) { MP4_Box_data_chan_t *p_chan = p_box->data.p_chan; free( p_chan->layout.p_descriptions ); } static int MP4_ReadBox_dec3( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dec3_t ); MP4_Box_data_dec3_t *p_dec3 = p_box->data.p_dec3; unsigned i_header; MP4_GET2BYTES( i_header ); p_dec3->i_data_rate = i_header >> 3; p_dec3->i_num_ind_sub = (i_header & 0x7) + 1; for (uint8_t i = 0; i < p_dec3->i_num_ind_sub; i++) { MP4_GET3BYTES( i_header ); p_dec3->stream[i].i_fscod = ( i_header >> 22 ) & 0x03; p_dec3->stream[i].i_bsid = ( i_header >> 17 ) & 0x01f; p_dec3->stream[i].i_bsmod = ( i_header >> 12 ) & 0x01f; p_dec3->stream[i].i_acmod = ( i_header >> 9 ) & 0x07; p_dec3->stream[i].i_lfeon = ( i_header >> 8 ) & 0x01; p_dec3->stream[i].i_num_dep_sub = (i_header >> 1) & 0x0f; if (p_dec3->stream[i].i_num_dep_sub) { MP4_GET1BYTE( p_dec3->stream[i].i_chan_loc ); p_dec3->stream[i].i_chan_loc |= (i_header & 1) << 8; } else p_dec3->stream[i].i_chan_loc = 0; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dec3\" bitrate %dkbps %d independant substreams", p_dec3->i_data_rate, p_dec3->i_num_ind_sub); for (uint8_t i = 0; i < p_dec3->i_num_ind_sub; i++) msg_Dbg( p_stream, "\tstream %d: bsid=0x%x bsmod=0x%x acmod=0x%x lfeon=0x%x " "num dependant subs=%d chan_loc=0x%x", i, p_dec3->stream[i].i_bsid, p_dec3->stream[i].i_bsmod, p_dec3->stream[i].i_acmod, p_dec3->stream[i].i_lfeon, p_dec3->stream[i].i_num_dep_sub, p_dec3->stream[i].i_chan_loc ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_dac3( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_dac3_t *p_dac3; MP4_READBOX_ENTER( MP4_Box_data_dac3_t ); p_dac3 = p_box->data.p_dac3; unsigned i_header; MP4_GET3BYTES( i_header ); p_dac3->i_fscod = ( i_header >> 22 ) & 0x03; p_dac3->i_bsid = ( i_header >> 17 ) & 0x01f; p_dac3->i_bsmod = ( i_header >> 14 ) & 0x07; p_dac3->i_acmod = ( i_header >> 11 ) & 0x07; p_dac3->i_lfeon = ( i_header >> 10 ) & 0x01; p_dac3->i_bitrate_code = ( i_header >> 5) & 0x1f; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dac3\" fscod=0x%x bsid=0x%x bsmod=0x%x acmod=0x%x lfeon=0x%x bitrate_code=0x%x", p_dac3->i_fscod, p_dac3->i_bsid, p_dac3->i_bsmod, p_dac3->i_acmod, p_dac3->i_lfeon, p_dac3->i_bitrate_code ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_dvc1( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_dvc1_t *p_dvc1; MP4_READBOX_ENTER( MP4_Box_data_dvc1_t ); p_dvc1 = p_box->data.p_dvc1; MP4_GET1BYTE( p_dvc1->i_profile_level ); /* profile is on 4bits, level 3bits */ uint8_t i_profile = (p_dvc1->i_profile_level & 0xf0) >> 4; if( i_profile != 0x06 && i_profile != 0x0c ) { msg_Warn( p_stream, "unsupported VC-1 profile (%"PRIu8"), please report", i_profile ); MP4_READBOX_EXIT( 0 ); } p_dvc1->i_vc1 = p_box->i_size - 7; /* Header + profile_level */ if( p_dvc1->i_vc1 > 0 ) { uint8_t *p = p_dvc1->p_vc1 = malloc( p_dvc1->i_vc1 ); if( p ) memcpy( p, p_peek, i_read ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dvc1\" profile=%"PRIu8" level=%i", i_profile, p_dvc1->i_profile_level & 0x0e >> 1 ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_enda( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_enda_t *p_enda; MP4_READBOX_ENTER( MP4_Box_data_enda_t ); p_enda = p_box->data.p_enda; MP4_GET2BYTES( p_enda->i_little_endian ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"enda\" little_endian=%d", p_enda->i_little_endian ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_sample_soun( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_soun; MP4_READBOX_ENTER( MP4_Box_data_sample_soun_t ); p_box->data.p_sample_soun->p_qt_description = NULL; /* Sanity check needed because the "wave" box does also contain an * "mp4a" box that we don't understand. */ if( i_read < 28 ) { i_read -= 30; MP4_READBOX_EXIT( 1 ); } for( unsigned i = 0; i < 6 ; i++ ) { MP4_GET1BYTE( p_box->data.p_sample_soun->i_reserved1[i] ); } MP4_GET2BYTES( p_box->data.p_sample_soun->i_data_reference_index ); /* * XXX hack -> produce a copy of the nearly complete chunk */ p_box->data.p_sample_soun->i_qt_description = 0; p_box->data.p_sample_soun->p_qt_description = NULL; if( i_read > 0 ) { p_box->data.p_sample_soun->p_qt_description = malloc( i_read ); if( p_box->data.p_sample_soun->p_qt_description ) { p_box->data.p_sample_soun->i_qt_description = i_read; memcpy( p_box->data.p_sample_soun->p_qt_description, p_peek, i_read ); } } MP4_GET2BYTES( p_box->data.p_sample_soun->i_qt_version ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_qt_revision_level ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_qt_vendor ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_channelcount ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_samplesize ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_compressionid ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_reserved3 ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_sampleratehi ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_sampleratelo ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" stsd qt_version %"PRIu16" compid=%"PRIx16, p_box->data.p_sample_soun->i_qt_version, p_box->data.p_sample_soun->i_compressionid ); #endif if( p_box->data.p_sample_soun->i_qt_version == 1 && i_read >= 16 ) { /* SoundDescriptionV1 */ MP4_GET4BYTES( p_box->data.p_sample_soun->i_sample_per_packet ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_packet ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_frame ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_sample ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V1 sample/packet=%d bytes/packet=%d " "bytes/frame=%d bytes/sample=%d", p_box->data.p_sample_soun->i_sample_per_packet, p_box->data.p_sample_soun->i_bytes_per_packet, p_box->data.p_sample_soun->i_bytes_per_frame, p_box->data.p_sample_soun->i_bytes_per_sample ); #endif stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 44 ); } else if( p_box->data.p_sample_soun->i_qt_version == 2 && i_read >= 36 ) { /* SoundDescriptionV2 */ double f_sample_rate; int64_t i_dummy64; uint32_t i_channel, i_extoffset, i_dummy32; /* Checks */ if ( p_box->data.p_sample_soun->i_channelcount != 0x3 || p_box->data.p_sample_soun->i_samplesize != 0x0010 || p_box->data.p_sample_soun->i_compressionid != 0xFFFE || p_box->data.p_sample_soun->i_reserved3 != 0x0 || p_box->data.p_sample_soun->i_sampleratehi != 0x1 ||//65536 p_box->data.p_sample_soun->i_sampleratelo != 0x0 ) //remainder { msg_Err( p_stream, "invalid stsd V2 box defaults" ); MP4_READBOX_EXIT( 0 ); } /* !Checks */ MP4_GET4BYTES( i_extoffset ); /* offset to stsd extentions */ MP4_GET8BYTES( i_dummy64 ); memcpy( &f_sample_rate, &i_dummy64, 8 ); msg_Dbg( p_stream, "read box: %f Hz", f_sample_rate ); p_box->data.p_sample_soun->i_sampleratehi = (int)f_sample_rate % BLOCK16x16; p_box->data.p_sample_soun->i_sampleratelo = f_sample_rate / BLOCK16x16; MP4_GET4BYTES( i_channel ); p_box->data.p_sample_soun->i_channelcount = i_channel; MP4_GET4BYTES( i_dummy32 ); if ( i_dummy32 != 0x7F000000 ) { msg_Err( p_stream, "invalid stsd V2 box" ); MP4_READBOX_EXIT( 0 ); } MP4_GET4BYTES( p_box->data.p_sample_soun->i_constbitsperchannel ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_formatflags ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_constbytesperaudiopacket ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_constLPCMframesperaudiopacket ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V2 rate=%f bitsperchannel=%u " "flags=%u bytesperpacket=%u lpcmframesperpacket=%u", f_sample_rate, p_box->data.p_sample_soun->i_constbitsperchannel, p_box->data.p_sample_soun->i_formatflags, p_box->data.p_sample_soun->i_constbytesperaudiopacket, p_box->data.p_sample_soun->i_constLPCMframesperaudiopacket ); #endif if ( i_extoffset < p_box->i_size ) stream_Seek( p_stream, p_box->i_pos + i_extoffset ); else stream_Seek( p_stream, p_box->i_pos + p_box->i_size ); } else { p_box->data.p_sample_soun->i_sample_per_packet = 0; p_box->data.p_sample_soun->i_bytes_per_packet = 0; p_box->data.p_sample_soun->i_bytes_per_frame = 0; p_box->data.p_sample_soun->i_bytes_per_sample = 0; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V0 or qt1/2 (rest=%"PRId64")", i_read ); #endif stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 28 ); } if( p_box->i_type == ATOM_drms ) { msg_Warn( p_stream, "DRM protected streams are not supported." ); MP4_READBOX_EXIT( 0 ); } if( p_box->i_type == ATOM_samr || p_box->i_type == ATOM_sawb ) { /* Ignore channelcount for AMR (3gpp AMRSpecificBox) */ p_box->data.p_sample_soun->i_channelcount = 1; } /* Loads extensions */ MP4_ReadBoxContainerRaw( p_stream, p_box ); /* esds/wave/... */ #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" in stsd channel %d " "sample size %d sample rate %f", p_box->data.p_sample_soun->i_channelcount, p_box->data.p_sample_soun->i_samplesize, (float)p_box->data.p_sample_soun->i_sampleratehi + (float)p_box->data.p_sample_soun->i_sampleratelo / BLOCK16x16 ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_sample_soun( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sample_soun->p_qt_description ); } int MP4_ReadBox_sample_vide( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_vide; MP4_READBOX_ENTER( MP4_Box_data_sample_vide_t ); for( unsigned i = 0; i < 6 ; i++ ) { MP4_GET1BYTE( p_box->data.p_sample_vide->i_reserved1[i] ); } MP4_GET2BYTES( p_box->data.p_sample_vide->i_data_reference_index ); /* * XXX hack -> produce a copy of the nearly complete chunk */ if( i_read > 0 ) { p_box->data.p_sample_vide->p_qt_image_description = malloc( i_read ); if( unlikely( p_box->data.p_sample_vide->p_qt_image_description == NULL ) ) MP4_READBOX_EXIT( 0 ); p_box->data.p_sample_vide->i_qt_image_description = i_read; memcpy( p_box->data.p_sample_vide->p_qt_image_description, p_peek, i_read ); } else { p_box->data.p_sample_vide->i_qt_image_description = 0; p_box->data.p_sample_vide->p_qt_image_description = NULL; } MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_version ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_revision_level ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_vendor ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_temporal_quality ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_spatial_quality ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_width ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_height ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_horizresolution ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_vertresolution ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_data_size ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_frame_count ); if ( i_read < 32 ) MP4_READBOX_EXIT( 0 ); memcpy( &p_box->data.p_sample_vide->i_compressorname, p_peek, 32 ); p_peek += 32; i_read -= 32; MP4_GET2BYTES( p_box->data.p_sample_vide->i_depth ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_color_table ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 78); if( p_box->i_type == ATOM_drmi ) { msg_Warn( p_stream, "DRM protected streams are not supported." ); MP4_READBOX_EXIT( 0 ); } MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"vide\" in stsd %dx%d depth %d", p_box->data.p_sample_vide->i_width, p_box->data.p_sample_vide->i_height, p_box->data.p_sample_vide->i_depth ); #endif MP4_READBOX_EXIT( 1 ); } void MP4_FreeBox_sample_vide( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sample_vide->p_qt_image_description ); } static int MP4_ReadBox_sample_mp4s( stream_t *p_stream, MP4_Box_t *p_box ) { stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); return 1; } static int MP4_ReadBox_sample_text( stream_t *p_stream, MP4_Box_t *p_box ) { int32_t t; p_box->i_handler = ATOM_text; MP4_READBOX_ENTER( MP4_Box_data_sample_text_t ); MP4_GET4BYTES( p_box->data.p_sample_text->i_reserved1 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_reserved2 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_data_reference_index ); MP4_GET4BYTES( p_box->data.p_sample_text->i_display_flags ); MP4_GET4BYTES( t ); switch( t ) { /* FIXME search right signification */ case 1: // Center p_box->data.p_sample_text->i_justification_horizontal = 1; p_box->data.p_sample_text->i_justification_vertical = 1; break; case -1: // Flush Right p_box->data.p_sample_text->i_justification_horizontal = -1; p_box->data.p_sample_text->i_justification_vertical = -1; break; case -2: // Flush Left p_box->data.p_sample_text->i_justification_horizontal = 0; p_box->data.p_sample_text->i_justification_vertical = 0; break; case 0: // Flush Default default: p_box->data.p_sample_text->i_justification_horizontal = 1; p_box->data.p_sample_text->i_justification_vertical = -1; break; } MP4_GET2BYTES( p_box->data.p_sample_text->i_background_color[0] ); MP4_GET2BYTES( p_box->data.p_sample_text->i_background_color[1] ); MP4_GET2BYTES( p_box->data.p_sample_text->i_background_color[2] ); p_box->data.p_sample_text->i_background_color[3] = 0xFF; MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_top ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_left ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_bottom ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_right ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"text\" in stsd text" ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_sample_tx3g( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_text; MP4_READBOX_ENTER( MP4_Box_data_sample_text_t ); MP4_GET4BYTES( p_box->data.p_sample_text->i_reserved1 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_reserved2 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_data_reference_index ); MP4_GET4BYTES( p_box->data.p_sample_text->i_display_flags ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_justification_horizontal ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_justification_vertical ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[0] ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[1] ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[2] ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[3] ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_top ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_left ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_bottom ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_right ); MP4_GET4BYTES( p_box->data.p_sample_text->i_reserved3 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_font_id ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_font_face ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_font_size ); MP4_GET4BYTES( p_box->data.p_sample_text->i_font_color ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"tx3g\" in stsd text" ); #endif MP4_READBOX_EXIT( 1 ); } #if 0 /* We can't easily call it, and anyway ~ 20 bytes lost isn't a real problem */ static void MP4_FreeBox_sample_text( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sample_text->psz_text_name ); } #endif static int MP4_ReadBox_stsd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsd ); MP4_GET4BYTES( p_box->data.p_stsd->i_entry_count ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsd\" entry-count %d", p_box->data.p_stsd->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stsz( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsz_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsz ); MP4_GET4BYTES( p_box->data.p_stsz->i_sample_size ); MP4_GET4BYTES( p_box->data.p_stsz->i_sample_count ); if( p_box->data.p_stsz->i_sample_size == 0 ) { p_box->data.p_stsz->i_entry_size = calloc( p_box->data.p_stsz->i_sample_count, sizeof(uint32_t) ); if( unlikely( !p_box->data.p_stsz->i_entry_size ) ) MP4_READBOX_EXIT( 0 ); for( unsigned int i = 0; (i<p_box->data.p_stsz->i_sample_count)&&(i_read >= 4 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stsz->i_entry_size[i] ); } } else p_box->data.p_stsz->i_entry_size = NULL; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsz\" sample-size %d sample-count %d", p_box->data.p_stsz->i_sample_size, p_box->data.p_stsz->i_sample_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stsz( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stsz->i_entry_size ); } static void MP4_FreeBox_stsc( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stsc->i_first_chunk ); FREENULL( p_box->data.p_stsc->i_samples_per_chunk ); FREENULL( p_box->data.p_stsc->i_sample_description_index ); } static int MP4_ReadBox_stsc( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsc_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsc ); MP4_GET4BYTES( p_box->data.p_stsc->i_entry_count ); p_box->data.p_stsc->i_first_chunk = calloc( p_box->data.p_stsc->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stsc->i_samples_per_chunk = calloc( p_box->data.p_stsc->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stsc->i_sample_description_index = calloc( p_box->data.p_stsc->i_entry_count, sizeof(uint32_t) ); if( unlikely( p_box->data.p_stsc->i_first_chunk == NULL || p_box->data.p_stsc->i_samples_per_chunk == NULL || p_box->data.p_stsc->i_sample_description_index == NULL ) ) { MP4_READBOX_EXIT( 0 ); } for( unsigned int i = 0; (i < p_box->data.p_stsc->i_entry_count )&&( i_read >= 12 );i++ ) { MP4_GET4BYTES( p_box->data.p_stsc->i_first_chunk[i] ); MP4_GET4BYTES( p_box->data.p_stsc->i_samples_per_chunk[i] ); MP4_GET4BYTES( p_box->data.p_stsc->i_sample_description_index[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsc\" entry-count %d", p_box->data.p_stsc->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stco_co64( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_co64_t ); MP4_GETVERSIONFLAGS( p_box->data.p_co64 ); MP4_GET4BYTES( p_box->data.p_co64->i_entry_count ); p_box->data.p_co64->i_chunk_offset = calloc( p_box->data.p_co64->i_entry_count, sizeof(uint64_t) ); if( p_box->data.p_co64->i_chunk_offset == NULL ) MP4_READBOX_EXIT( 0 ); for( unsigned int i = 0; i < p_box->data.p_co64->i_entry_count; i++ ) { if( p_box->i_type == ATOM_stco ) { if( i_read < 4 ) { break; } MP4_GET4BYTES( p_box->data.p_co64->i_chunk_offset[i] ); } else { if( i_read < 8 ) { break; } MP4_GET8BYTES( p_box->data.p_co64->i_chunk_offset[i] ); } } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"co64\" entry-count %d", p_box->data.p_co64->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stco_co64( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_co64->i_chunk_offset ); } static int MP4_ReadBox_stss( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stss_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stss ); MP4_GET4BYTES( p_box->data.p_stss->i_entry_count ); p_box->data.p_stss->i_sample_number = calloc( p_box->data.p_stss->i_entry_count, sizeof(uint32_t) ); if( unlikely( p_box->data.p_stss->i_sample_number == NULL ) ) MP4_READBOX_EXIT( 0 ); unsigned int i; for( i = 0; (i < p_box->data.p_stss->i_entry_count )&&( i_read >= 4 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stss->i_sample_number[i] ); /* XXX in libmp4 sample begin at 0 */ p_box->data.p_stss->i_sample_number[i]--; } if ( i < p_box->data.p_stss->i_entry_count ) p_box->data.p_stss->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stss\" entry-count %d", p_box->data.p_stss->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stss( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stss->i_sample_number ); } static void MP4_FreeBox_stsh( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stsh->i_shadowed_sample_number ); FREENULL( p_box->data.p_stsh->i_sync_sample_number ); } static int MP4_ReadBox_stsh( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsh_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsh ); MP4_GET4BYTES( p_box->data.p_stsh->i_entry_count ); p_box->data.p_stsh->i_shadowed_sample_number = calloc( p_box->data.p_stsh->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stsh->i_sync_sample_number = calloc( p_box->data.p_stsh->i_entry_count, sizeof(uint32_t) ); if( p_box->data.p_stsh->i_shadowed_sample_number == NULL || p_box->data.p_stsh->i_sync_sample_number == NULL ) { MP4_READBOX_EXIT( 0 ); } unsigned i; for( i = 0; (i < p_box->data.p_stss->i_entry_count )&&( i_read >= 8 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stsh->i_shadowed_sample_number[i] ); MP4_GET4BYTES( p_box->data.p_stsh->i_sync_sample_number[i] ); } if ( i < p_box->data.p_stss->i_entry_count ) p_box->data.p_stss->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsh\" entry-count %d", p_box->data.p_stsh->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stdp( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stdp_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stdp ); p_box->data.p_stdp->i_priority = calloc( i_read / 2, sizeof(uint16_t) ); if( unlikely( !p_box->data.p_stdp->i_priority ) ) MP4_READBOX_EXIT( 0 ); for( unsigned i = 0; i < i_read / 2 ; i++ ) { MP4_GET2BYTES( p_box->data.p_stdp->i_priority[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stdp\" entry-count %"PRId64, i_read / 2 ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stdp( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stdp->i_priority ); } static void MP4_FreeBox_padb( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_padb->i_reserved1 ); FREENULL( p_box->data.p_padb->i_pad2 ); FREENULL( p_box->data.p_padb->i_reserved2 ); FREENULL( p_box->data.p_padb->i_pad1 ); } static int MP4_ReadBox_padb( stream_t *p_stream, MP4_Box_t *p_box ) { uint32_t count; MP4_READBOX_ENTER( MP4_Box_data_padb_t ); MP4_GETVERSIONFLAGS( p_box->data.p_padb ); MP4_GET4BYTES( p_box->data.p_padb->i_sample_count ); count = (p_box->data.p_padb->i_sample_count + 1) / 2; p_box->data.p_padb->i_reserved1 = calloc( count, sizeof(uint16_t) ); p_box->data.p_padb->i_pad2 = calloc( count, sizeof(uint16_t) ); p_box->data.p_padb->i_reserved2 = calloc( count, sizeof(uint16_t) ); p_box->data.p_padb->i_pad1 = calloc( count, sizeof(uint16_t) ); if( p_box->data.p_padb->i_reserved1 == NULL || p_box->data.p_padb->i_pad2 == NULL || p_box->data.p_padb->i_reserved2 == NULL || p_box->data.p_padb->i_pad1 == NULL ) { MP4_READBOX_EXIT( 0 ); } for( unsigned int i = 0; i < i_read / 2 ; i++ ) { if( i >= count ) { MP4_READBOX_EXIT( 0 ); } p_box->data.p_padb->i_reserved1[i] = ( (*p_peek) >> 7 )&0x01; p_box->data.p_padb->i_pad2[i] = ( (*p_peek) >> 4 )&0x07; p_box->data.p_padb->i_reserved1[i] = ( (*p_peek) >> 3 )&0x01; p_box->data.p_padb->i_pad1[i] = ( (*p_peek) )&0x07; p_peek += 1; i_read -= 1; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stdp\" entry-count %"PRId64, i_read / 2 ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_elst( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_elst->i_segment_duration ); FREENULL( p_box->data.p_elst->i_media_time ); FREENULL( p_box->data.p_elst->i_media_rate_integer ); FREENULL( p_box->data.p_elst->i_media_rate_fraction ); } static int MP4_ReadBox_elst( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_elst_t ); MP4_GETVERSIONFLAGS( p_box->data.p_elst ); MP4_GET4BYTES( p_box->data.p_elst->i_entry_count ); p_box->data.p_elst->i_segment_duration = calloc( p_box->data.p_elst->i_entry_count, sizeof(uint64_t) ); p_box->data.p_elst->i_media_time = calloc( p_box->data.p_elst->i_entry_count, sizeof(int64_t) ); p_box->data.p_elst->i_media_rate_integer = calloc( p_box->data.p_elst->i_entry_count, sizeof(uint16_t) ); p_box->data.p_elst->i_media_rate_fraction = calloc( p_box->data.p_elst->i_entry_count, sizeof(uint16_t) ); if( p_box->data.p_elst->i_segment_duration == NULL || p_box->data.p_elst->i_media_time == NULL || p_box->data.p_elst->i_media_rate_integer == NULL || p_box->data.p_elst->i_media_rate_fraction == NULL ) { MP4_READBOX_EXIT( 0 ); } unsigned i; for( i = 0; i < p_box->data.p_elst->i_entry_count; i++ ) { if( p_box->data.p_elst->i_version == 1 ) { if ( i_read < 20 ) break; MP4_GET8BYTES( p_box->data.p_elst->i_segment_duration[i] ); MP4_GET8BYTES( p_box->data.p_elst->i_media_time[i] ); } else { if ( i_read < 12 ) break; MP4_GET4BYTES( p_box->data.p_elst->i_segment_duration[i] ); MP4_GET4BYTES( p_box->data.p_elst->i_media_time[i] ); p_box->data.p_elst->i_media_time[i] = (int32_t)p_box->data.p_elst->i_media_time[i]; } MP4_GET2BYTES( p_box->data.p_elst->i_media_rate_integer[i] ); MP4_GET2BYTES( p_box->data.p_elst->i_media_rate_fraction[i] ); } if ( i < p_box->data.p_elst->i_entry_count ) p_box->data.p_elst->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"elst\" entry-count %lu", (unsigned long)p_box->data.p_elst->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_cprt( stream_t *p_stream, MP4_Box_t *p_box ) { uint16_t i_language; bool b_mac; MP4_READBOX_ENTER( MP4_Box_data_cprt_t ); MP4_GETVERSIONFLAGS( p_box->data.p_cprt ); MP4_GET2BYTES( i_language ); decodeQtLanguageCode( i_language, p_box->data.p_cprt->rgs_language, &b_mac ); MP4_GETSTRINGZ( p_box->data.p_cprt->psz_notice ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"cprt\" language %3.3s notice %s", p_box->data.p_cprt->rgs_language, p_box->data.p_cprt->psz_notice ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_cprt( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_cprt->psz_notice ); } static int MP4_ReadBox_dcom( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dcom_t ); MP4_GETFOURCC( p_box->data.p_dcom->i_algorithm ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dcom\" compression algorithm : %4.4s", (char*)&p_box->data.p_dcom->i_algorithm ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_cmvd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_cmvd_t ); MP4_GET4BYTES( p_box->data.p_cmvd->i_uncompressed_size ); p_box->data.p_cmvd->i_compressed_size = i_read; if( !( p_box->data.p_cmvd->p_data = malloc( i_read ) ) ) MP4_READBOX_EXIT( 0 ); /* now copy compressed data */ memcpy( p_box->data.p_cmvd->p_data, p_peek,i_read); p_box->data.p_cmvd->b_compressed = 1; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"cmvd\" compressed data size %d", p_box->data.p_cmvd->i_compressed_size ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_cmvd( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_cmvd->p_data ); } static int MP4_ReadBox_cmov( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_t *p_dcom; MP4_Box_t *p_cmvd; #ifdef HAVE_ZLIB_H stream_t *p_stream_memory; z_stream z_data; uint8_t *p_data; int i_result; #endif if( !( p_box->data.p_cmov = calloc(1, sizeof( MP4_Box_data_cmov_t ) ) ) ) return 0; if( !p_box->p_father || ( p_box->p_father->i_type != ATOM_moov && p_box->p_father->i_type != ATOM_foov ) ) { msg_Warn( p_stream, "Read box: \"cmov\" box alone" ); return 1; } if( !MP4_ReadBoxContainer( p_stream, p_box ) ) { return 0; } if( ( p_dcom = MP4_BoxGet( p_box, "dcom" ) ) == NULL || ( p_cmvd = MP4_BoxGet( p_box, "cmvd" ) ) == NULL || p_cmvd->data.p_cmvd->p_data == NULL ) { msg_Warn( p_stream, "read box: \"cmov\" incomplete" ); return 0; } if( p_dcom->data.p_dcom->i_algorithm != ATOM_zlib ) { msg_Dbg( p_stream, "read box: \"cmov\" compression algorithm : %4.4s " "not supported", (char*)&p_dcom->data.p_dcom->i_algorithm ); return 0; } #ifndef HAVE_ZLIB_H msg_Dbg( p_stream, "read box: \"cmov\" zlib unsupported" ); return 0; #else /* decompress data */ /* allocate a new buffer */ if( !( p_data = malloc( p_cmvd->data.p_cmvd->i_uncompressed_size ) ) ) return 0; /* init default structures */ z_data.next_in = p_cmvd->data.p_cmvd->p_data; z_data.avail_in = p_cmvd->data.p_cmvd->i_compressed_size; z_data.next_out = p_data; z_data.avail_out = p_cmvd->data.p_cmvd->i_uncompressed_size; z_data.zalloc = (alloc_func)Z_NULL; z_data.zfree = (free_func)Z_NULL; z_data.opaque = (voidpf)Z_NULL; /* init zlib */ if( inflateInit( &z_data ) != Z_OK ) { msg_Err( p_stream, "read box: \"cmov\" error while uncompressing" ); free( p_data ); return 0; } /* uncompress */ i_result = inflate( &z_data, Z_NO_FLUSH ); if( i_result != Z_OK && i_result != Z_STREAM_END ) { msg_Err( p_stream, "read box: \"cmov\" error while uncompressing" ); free( p_data ); return 0; } if( p_cmvd->data.p_cmvd->i_uncompressed_size != z_data.total_out ) { msg_Warn( p_stream, "read box: \"cmov\" uncompressing data size " "mismatch" ); } p_cmvd->data.p_cmvd->i_uncompressed_size = z_data.total_out; /* close zlib */ if( inflateEnd( &z_data ) != Z_OK ) { msg_Warn( p_stream, "read box: \"cmov\" error while uncompressing " "data (ignored)" ); } free( p_cmvd->data.p_cmvd->p_data ); p_cmvd->data.p_cmvd->p_data = p_data; p_cmvd->data.p_cmvd->b_compressed = 0; msg_Dbg( p_stream, "read box: \"cmov\" box successfully uncompressed" ); /* now create a memory stream */ p_stream_memory = stream_MemoryNew( VLC_OBJECT(p_stream), p_cmvd->data.p_cmvd->p_data, p_cmvd->data.p_cmvd->i_uncompressed_size, true ); /* and read uncompressd moov */ p_box->data.p_cmov->p_moov = MP4_ReadBox( p_stream_memory, NULL ); stream_Delete( p_stream_memory ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"cmov\" compressed movie header completed"); #endif return p_box->data.p_cmov->p_moov ? 1 : 0; #endif /* HAVE_ZLIB_H */ } static int MP4_ReadBox_rdrf( stream_t *p_stream, MP4_Box_t *p_box ) { uint32_t i_len; MP4_READBOX_ENTER( MP4_Box_data_rdrf_t ); MP4_GETVERSIONFLAGS( p_box->data.p_rdrf ); MP4_GETFOURCC( p_box->data.p_rdrf->i_ref_type ); MP4_GET4BYTES( i_len ); i_len++; if( i_len > 0 ) { p_box->data.p_rdrf->psz_ref = malloc( i_len ); if( p_box->data.p_rdrf->psz_ref == NULL ) MP4_READBOX_EXIT( 0 ); i_len--; for( unsigned i = 0; i < i_len; i++ ) { MP4_GET1BYTE( p_box->data.p_rdrf->psz_ref[i] ); } p_box->data.p_rdrf->psz_ref[i_len] = '\0'; } else { p_box->data.p_rdrf->psz_ref = NULL; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rdrf\" type:%4.4s ref %s", (char*)&p_box->data.p_rdrf->i_ref_type, p_box->data.p_rdrf->psz_ref ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_rdrf( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_rdrf->psz_ref ); } static int MP4_ReadBox_rmdr( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_rmdr_t ); MP4_GETVERSIONFLAGS( p_box->data.p_rmdr ); MP4_GET4BYTES( p_box->data.p_rmdr->i_rate ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rmdr\" rate:%d", p_box->data.p_rmdr->i_rate ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_rmqu( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_rmqu_t ); MP4_GET4BYTES( p_box->data.p_rmqu->i_quality ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rmqu\" quality:%d", p_box->data.p_rmqu->i_quality ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_rmvc( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_rmvc_t ); MP4_GETVERSIONFLAGS( p_box->data.p_rmvc ); MP4_GETFOURCC( p_box->data.p_rmvc->i_gestaltType ); MP4_GET4BYTES( p_box->data.p_rmvc->i_val1 ); MP4_GET4BYTES( p_box->data.p_rmvc->i_val2 ); MP4_GET2BYTES( p_box->data.p_rmvc->i_checkType ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rmvc\" gestaltType:%4.4s val1:0x%x val2:0x%x checkType:0x%x", (char*)&p_box->data.p_rmvc->i_gestaltType, p_box->data.p_rmvc->i_val1,p_box->data.p_rmvc->i_val2, p_box->data.p_rmvc->i_checkType ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_frma( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_frma_t ); MP4_GETFOURCC( p_box->data.p_frma->i_type ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"frma\" i_type:%4.4s", (char *)&p_box->data.p_frma->i_type ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_skcr( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_skcr_t ); MP4_GET4BYTES( p_box->data.p_skcr->i_init ); MP4_GET4BYTES( p_box->data.p_skcr->i_encr ); MP4_GET4BYTES( p_box->data.p_skcr->i_decr ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"skcr\" i_init:%d i_encr:%d i_decr:%d", p_box->data.p_skcr->i_init, p_box->data.p_skcr->i_encr, p_box->data.p_skcr->i_decr ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_drms( stream_t *p_stream, MP4_Box_t *p_box ) { VLC_UNUSED(p_box); /* ATOMs 'user', 'key', 'iviv', and 'priv' will be skipped, * so unless data decrypt itself by magic, there will be no playback, * but we never know... */ msg_Warn( p_stream, "DRM protected streams are not supported." ); return 1; } static int MP4_ReadBox_String( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_string_t ); p_box->data.p_string->psz_text = malloc( p_box->i_size + 1 - 8 ); /* +\0, -name, -size */ if( p_box->data.p_string->psz_text == NULL ) MP4_READBOX_EXIT( 0 ); memcpy( p_box->data.p_string->psz_text, p_peek, p_box->i_size - 8 ); p_box->data.p_string->psz_text[p_box->i_size - 8] = '\0'; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"%4.4s\" text=`%s'", (char *) & p_box->i_type, p_box->data.p_string->psz_text ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_String( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_string->psz_text ); } static int MP4_ReadBox_Binary( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_binary_t ); i_read = __MIN( i_read, UINT32_MAX ); if ( i_read > 0 ) { p_box->data.p_binary->p_blob = malloc( i_read ); if ( p_box->data.p_binary->p_blob ) { memcpy( p_box->data.p_binary->p_blob, p_peek, i_read ); p_box->data.p_binary->i_blob = i_read; } } MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_Binary( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_binary->p_blob ); p_box->data.p_binary->i_blob = 0; } static int MP4_ReadBox_data( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_data_t ); MP4_Box_data_data_t *p_data = p_box->data.p_data; if ( i_read < 8 || i_read - 8 > UINT32_MAX ) MP4_READBOX_EXIT( 0 ); uint8_t i_type; MP4_GET1BYTE( i_type ); if ( i_type != 0 ) { #ifdef MP4_VERBOSE msg_Dbg( p_stream, "skipping unknown 'data' atom with type %"PRIu8, i_type ); #endif MP4_READBOX_EXIT( 0 ); } MP4_GET3BYTES( p_data->e_wellknowntype ); MP4_GET2BYTES( p_data->locale.i_country ); MP4_GET2BYTES( p_data->locale.i_language ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read 'data' atom: knowntype=%"PRIu32", country=%"PRIu16" lang=%"PRIu16 ", size %"PRId64" bytes", p_data->e_wellknowntype, p_data->locale.i_country, p_data->locale.i_language, i_read ); #endif p_box->data.p_data->p_blob = malloc( i_read ); if ( !p_box->data.p_data->p_blob ) MP4_READBOX_EXIT( 0 ); p_box->data.p_data->i_blob = i_read; memcpy( p_box->data.p_data->p_blob, p_peek, i_read); MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_data( MP4_Box_t *p_box ) { free( p_box->data.p_data->p_blob ); } static int MP4_ReadBox_Metadata( stream_t *p_stream, MP4_Box_t *p_box ) { const uint8_t *p_peek; if ( stream_Peek( p_stream, &p_peek, 16 ) < 16 ) return 0; if ( stream_Read( p_stream, NULL, 8 ) < 8 ) return 0; return MP4_ReadBoxContainerChildren( p_stream, p_box, ATOM_data ); } /* Chapter support */ static void MP4_FreeBox_chpl( MP4_Box_t *p_box ) { MP4_Box_data_chpl_t *p_chpl = p_box->data.p_chpl; for( unsigned i = 0; i < p_chpl->i_chapter; i++ ) free( p_chpl->chapter[i].psz_name ); } static int MP4_ReadBox_chpl( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_chpl_t *p_chpl; uint32_t i_dummy; VLC_UNUSED(i_dummy); int i; MP4_READBOX_ENTER( MP4_Box_data_chpl_t ); p_chpl = p_box->data.p_chpl; MP4_GETVERSIONFLAGS( p_chpl ); if ( i_read < 5 || p_chpl->i_version != 0x1 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( i_dummy ); MP4_GET1BYTE( p_chpl->i_chapter ); for( i = 0; i < p_chpl->i_chapter; i++ ) { uint64_t i_start; uint8_t i_len; int i_copy; if ( i_read < 9 ) break; MP4_GET8BYTES( i_start ); MP4_GET1BYTE( i_len ); p_chpl->chapter[i].psz_name = malloc( i_len + 1 ); if( !p_chpl->chapter[i].psz_name ) MP4_READBOX_EXIT( 0 ); i_copy = __MIN( i_len, i_read ); if( i_copy > 0 ) memcpy( p_chpl->chapter[i].psz_name, p_peek, i_copy ); p_chpl->chapter[i].psz_name[i_copy] = '\0'; p_chpl->chapter[i].i_start = i_start; p_peek += i_copy; i_read -= i_copy; } if ( i != p_chpl->i_chapter ) p_chpl->i_chapter = i; /* Bubble sort by increasing start date */ do { for( i = 0; i < p_chpl->i_chapter - 1; i++ ) { if( p_chpl->chapter[i].i_start > p_chpl->chapter[i+1].i_start ) { char *psz = p_chpl->chapter[i+1].psz_name; int64_t i64 = p_chpl->chapter[i+1].i_start; p_chpl->chapter[i+1].psz_name = p_chpl->chapter[i].psz_name; p_chpl->chapter[i+1].i_start = p_chpl->chapter[i].i_start; p_chpl->chapter[i].psz_name = psz; p_chpl->chapter[i].i_start = i64; i = -1; break; } } } while( i == -1 ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"chpl\" %d chapters", p_chpl->i_chapter ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tref_generic( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tref_generic_t ); p_box->data.p_tref_generic->i_track_ID = NULL; p_box->data.p_tref_generic->i_entry_count = i_read / sizeof(uint32_t); if( p_box->data.p_tref_generic->i_entry_count > 0 ) p_box->data.p_tref_generic->i_track_ID = calloc( p_box->data.p_tref_generic->i_entry_count, sizeof(uint32_t) ); if( p_box->data.p_tref_generic->i_track_ID == NULL ) MP4_READBOX_EXIT( 0 ); for( unsigned i = 0; i < p_box->data.p_tref_generic->i_entry_count; i++ ) { MP4_GET4BYTES( p_box->data.p_tref_generic->i_track_ID[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"chap\" %d references", p_box->data.p_tref_generic->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_tref_generic( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_tref_generic->i_track_ID ); } static int MP4_ReadBox_keys( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_keys_t ); if ( i_read < 8 ) MP4_READBOX_EXIT( 0 ); uint32_t i_count; MP4_GET4BYTES( i_count ); /* reserved + flags */ if ( i_count != 0 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( i_count ); p_box->data.p_keys->p_entries = calloc( i_count, sizeof(*p_box->data.p_keys->p_entries) ); if ( !p_box->data.p_keys->p_entries ) MP4_READBOX_EXIT( 0 ); p_box->data.p_keys->i_entry_count = i_count; uint32_t i=0; for( ; i < i_count; i++ ) { if ( i_read < 8 ) break; uint32_t i_keysize; MP4_GET4BYTES( i_keysize ); if ( (i_keysize < 8) || (i_keysize - 4 > i_read) ) break; MP4_GETFOURCC( p_box->data.p_keys->p_entries[i].i_namespace ); i_keysize -= 8; p_box->data.p_keys->p_entries[i].psz_value = malloc( i_keysize + 1 ); if ( !p_box->data.p_keys->p_entries[i].psz_value ) break; memcpy( p_box->data.p_keys->p_entries[i].psz_value, p_peek, i_keysize ); p_box->data.p_keys->p_entries[i].psz_value[i_keysize] = 0; p_peek += i_keysize; i_read -= i_keysize; #ifdef MP4_ULTRA_VERBOSE msg_Dbg( p_stream, "read box: \"keys\": %u '%s'", i + 1, p_box->data.p_keys->p_entries[i].psz_value ); #endif } if ( i < i_count ) p_box->data.p_keys->i_entry_count = i; MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_keys( MP4_Box_t *p_box ) { for( uint32_t i=0; i<p_box->data.p_keys->i_entry_count; i++ ) free( p_box->data.p_keys->p_entries[i].psz_value ); free( p_box->data.p_keys->p_entries ); } static int MP4_ReadBox_meta( stream_t *p_stream, MP4_Box_t *p_box ) { uint8_t meta_data[8]; int i_actually_read; // skip over box header i_actually_read = stream_Read( p_stream, meta_data, 8 ); if( i_actually_read < 8 ) return 0; if ( p_box->p_father && p_box->p_father->i_type == ATOM_udta ) /* itunes udta/meta */ { /* meta content starts with a 4 byte version/flags value (should be 0) */ i_actually_read = stream_Read( p_stream, meta_data, 4 ); if( i_actually_read < 4 || memcmp( meta_data, "\0\0\0", 4 ) ) return 0; } if ( !MP4_ReadBoxContainerChildren( p_stream, p_box, ATOM_hdlr ) ) return 0; /* Mandatory */ const MP4_Box_t *p_hdlr = MP4_BoxGet( p_box, "hdlr" ); if ( !p_hdlr || !BOXDATA(p_hdlr) || ( BOXDATA(p_hdlr)->i_handler_type != HANDLER_mdta && BOXDATA(p_hdlr)->i_handler_type != HANDLER_mdir ) || BOXDATA(p_hdlr)->i_version != 0 ) return 0; /* then it behaves like a container */ return MP4_ReadBoxContainerRaw( p_stream, p_box ); } static int MP4_ReadBox_iods( stream_t *p_stream, MP4_Box_t *p_box ) { char i_unused; VLC_UNUSED(i_unused); MP4_READBOX_ENTER( MP4_Box_data_iods_t ); MP4_GETVERSIONFLAGS( p_box->data.p_iods ); MP4_GET1BYTE( i_unused ); /* tag */ MP4_GET1BYTE( i_unused ); /* length */ MP4_GET2BYTES( p_box->data.p_iods->i_object_descriptor ); /* 10bits, 6 other bits are used for other flags */ MP4_GET1BYTE( p_box->data.p_iods->i_OD_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_scene_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_audio_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_visual_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_graphics_profile_level ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"iods\" objectDescriptorId: %i, OD: %i, scene: %i, audio: %i, " "visual: %i, graphics: %i", p_box->data.p_iods->i_object_descriptor >> 6, p_box->data.p_iods->i_OD_profile_level, p_box->data.p_iods->i_scene_profile_level, p_box->data.p_iods->i_audio_profile_level, p_box->data.p_iods->i_visual_profile_level, p_box->data.p_iods->i_graphics_profile_level ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_pasp( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_pasp_t ); MP4_GET4BYTES( p_box->data.p_pasp->i_horizontal_spacing ); MP4_GET4BYTES( p_box->data.p_pasp->i_vertical_spacing ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"paps\" %dx%d", p_box->data.p_pasp->i_horizontal_spacing, p_box->data.p_pasp->i_vertical_spacing); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mehd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_mehd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mehd ); if( p_box->data.p_mehd->i_version == 1 ) MP4_GET8BYTES( p_box->data.p_mehd->i_fragment_duration ); else /* version == 0 */ MP4_GET4BYTES( p_box->data.p_mehd->i_fragment_duration ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"mehd\" frag dur. %"PRIu64"", p_box->data.p_mehd->i_fragment_duration ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_trex( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_trex_t ); MP4_GETVERSIONFLAGS( p_box->data.p_trex ); MP4_GET4BYTES( p_box->data.p_trex->i_track_ID ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_description_index ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_duration ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_size ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_flags ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"trex\" trackID: %"PRIu32"", p_box->data.p_trex->i_track_ID ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_sdtp( stream_t *p_stream, MP4_Box_t *p_box ) { uint32_t i_sample_count; MP4_READBOX_ENTER( MP4_Box_data_sdtp_t ); MP4_Box_data_sdtp_t *p_sdtp = p_box->data.p_sdtp; MP4_GETVERSIONFLAGS( p_box->data.p_sdtp ); i_sample_count = i_read; p_sdtp->p_sample_table = calloc( i_sample_count, 1 ); if( !p_sdtp->p_sample_table ) MP4_READBOX_EXIT( 0 ); for( uint32_t i = 0; i < i_sample_count; i++ ) MP4_GET1BYTE( p_sdtp->p_sample_table[i] ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "i_sample_count is %"PRIu32"", i_sample_count ); if ( i_sample_count > 3 ) msg_Dbg( p_stream, "read box: \"sdtp\" head: %"PRIx8" %"PRIx8" %"PRIx8" %"PRIx8"", p_sdtp->p_sample_table[0], p_sdtp->p_sample_table[1], p_sdtp->p_sample_table[2], p_sdtp->p_sample_table[3] ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_sdtp( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sdtp->p_sample_table ); } static int MP4_ReadBox_tsel( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tsel_t ); uint32_t i_version; MP4_GET4BYTES( i_version ); if ( i_version != 0 || i_read < 4 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( p_box->data.p_tsel->i_switch_group ); /* ignore list of attributes as es are present before switch */ MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mfro( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_mfro_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mfro ); MP4_GET4BYTES( p_box->data.p_mfro->i_size ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"mfro\" size: %"PRIu32"", p_box->data.p_mfro->i_size); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tfra( stream_t *p_stream, MP4_Box_t *p_box ) { #define READ_VARIABLE_LENGTH(lengthvar, p_array) switch (lengthvar)\ {\ case 0:\ MP4_GET1BYTE( p_array[i] );\ break;\ case 1:\ MP4_GET2BYTES( *((uint16_t *)&p_array[i*2]) );\ break;\ case 2:\ MP4_GET3BYTES( *((uint32_t *)&p_array[i*4]) );\ break;\ case 3:\ MP4_GET4BYTES( *((uint32_t *)&p_array[i*4]) );\ break;\ default:\ goto error;\ } #define FIX_VARIABLE_LENGTH(lengthvar) if ( lengthvar == 3 ) lengthvar = 4 uint32_t i_number_of_entries; MP4_READBOX_ENTER( MP4_Box_data_tfra_t ); MP4_Box_data_tfra_t *p_tfra = p_box->data.p_tfra; MP4_GETVERSIONFLAGS( p_box->data.p_tfra ); if ( p_tfra->i_version > 1 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( p_tfra->i_track_ID ); uint32_t i_lengths = 0; MP4_GET4BYTES( i_lengths ); MP4_GET4BYTES( p_tfra->i_number_of_entries ); i_number_of_entries = p_tfra->i_number_of_entries; p_tfra->i_length_size_of_traf_num = i_lengths >> 4; p_tfra->i_length_size_of_trun_num = ( i_lengths & 0x0c ) >> 2; p_tfra->i_length_size_of_sample_num = i_lengths & 0x03; size_t size = 4 + 4*p_tfra->i_version; /* size in {4, 8} */ p_tfra->p_time = calloc( i_number_of_entries, size ); p_tfra->p_moof_offset = calloc( i_number_of_entries, size ); size = 1 + p_tfra->i_length_size_of_traf_num; /* size in [|1, 4|] */ if ( size == 3 ) size++; p_tfra->p_traf_number = calloc( i_number_of_entries, size ); size = 1 + p_tfra->i_length_size_of_trun_num; if ( size == 3 ) size++; p_tfra->p_trun_number = calloc( i_number_of_entries, size ); size = 1 + p_tfra->i_length_size_of_sample_num; if ( size == 3 ) size++; p_tfra->p_sample_number = calloc( i_number_of_entries, size ); if( !p_tfra->p_time || !p_tfra->p_moof_offset || !p_tfra->p_traf_number || !p_tfra->p_trun_number || !p_tfra->p_sample_number ) goto error; int i_fields_length = 3 + p_tfra->i_length_size_of_traf_num + p_tfra->i_length_size_of_trun_num + p_tfra->i_length_size_of_sample_num; uint32_t i; for( i = 0; i < i_number_of_entries; i++ ) { if( p_tfra->i_version == 1 ) { if ( i_read < i_fields_length + 16 ) break; MP4_GET8BYTES( *((uint64_t *)&p_tfra->p_time[i*2]) ); MP4_GET8BYTES( *((uint64_t *)&p_tfra->p_moof_offset[i*2]) ); } else { if ( i_read < i_fields_length + 8 ) break; MP4_GET4BYTES( p_tfra->p_time[i] ); MP4_GET4BYTES( p_tfra->p_moof_offset[i] ); } READ_VARIABLE_LENGTH(p_tfra->i_length_size_of_traf_num, p_tfra->p_traf_number); READ_VARIABLE_LENGTH(p_tfra->i_length_size_of_trun_num, p_tfra->p_trun_number); READ_VARIABLE_LENGTH(p_tfra->i_length_size_of_sample_num, p_tfra->p_sample_number); } if ( i < i_number_of_entries ) i_number_of_entries = i; FIX_VARIABLE_LENGTH(p_tfra->i_length_size_of_traf_num); FIX_VARIABLE_LENGTH(p_tfra->i_length_size_of_trun_num); FIX_VARIABLE_LENGTH(p_tfra->i_length_size_of_sample_num); #ifdef MP4_ULTRA_VERBOSE for( i = 0; i < i_number_of_entries; i++ ) { if( p_tfra->i_version == 0 ) { msg_Dbg( p_stream, "tfra[%"PRIu32"] time[%"PRIu32"]: %"PRIu32", " "moof_offset[%"PRIu32"]: %"PRIu32"", p_tfra->i_track_ID, i, p_tfra->p_time[i], i, p_tfra->p_moof_offset[i] ); } else { msg_Dbg( p_stream, "tfra[%"PRIu32"] time[%"PRIu32"]: %"PRIu64", " "moof_offset[%"PRIu32"]: %"PRIu64"", p_tfra->i_track_ID, i, ((uint64_t *)(p_tfra->p_time))[i], i, ((uint64_t *)(p_tfra->p_moof_offset))[i] ); } } #endif #ifdef MP4_VERBOSE msg_Dbg( p_stream, "tfra[%"PRIu32"] %"PRIu32" entries", p_tfra->i_track_ID, i_number_of_entries ); #endif MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); #undef READ_VARIABLE_LENGTH #undef FIX_VARIABLE_LENGTH } static void MP4_FreeBox_tfra( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_tfra->p_time ); FREENULL( p_box->data.p_tfra->p_moof_offset ); FREENULL( p_box->data.p_tfra->p_traf_number ); FREENULL( p_box->data.p_tfra->p_trun_number ); FREENULL( p_box->data.p_tfra->p_sample_number ); } static int MP4_ReadBox_pnot( stream_t *p_stream, MP4_Box_t *p_box ) { if ( p_box->i_size != 20 ) return 0; MP4_READBOX_ENTER( MP4_Box_data_pnot_t ); MP4_GET4BYTES( p_box->data.p_pnot->i_date ); uint16_t i_version; MP4_GET2BYTES( i_version ); if ( i_version != 0 ) MP4_READBOX_EXIT( 0 ); MP4_GETFOURCC( p_box->data.p_pnot->i_type ); MP4_GET2BYTES( p_box->data.p_pnot->i_index ); MP4_READBOX_EXIT( 1 ); } /* For generic */ static int MP4_ReadBox_default( stream_t *p_stream, MP4_Box_t *p_box ) { if( !p_box->p_father ) { goto unknown; } if( p_box->p_father->i_type == ATOM_stsd ) { MP4_Box_t *p_mdia = MP4_BoxGet( p_box, "../../../.." ); MP4_Box_t *p_hdlr; if( p_mdia == NULL || p_mdia->i_type != ATOM_mdia || (p_hdlr = MP4_BoxGet( p_mdia, "hdlr" )) == NULL ) { goto unknown; } switch( p_hdlr->data.p_hdlr->i_handler_type ) { case ATOM_soun: return MP4_ReadBox_sample_soun( p_stream, p_box ); case ATOM_vide: return MP4_ReadBox_sample_vide( p_stream, p_box ); case ATOM_text: return MP4_ReadBox_sample_text( p_stream, p_box ); case ATOM_tx3g: case ATOM_sbtl: return MP4_ReadBox_sample_tx3g( p_stream, p_box ); default: msg_Warn( p_stream, "unknown handler type in stsd (incompletely loaded)" ); return 1; } } unknown: if MP4_BOX_TYPE_ASCII() msg_Warn( p_stream, "unknown box type %4.4s (incompletely loaded)", (char*)&p_box->i_type ); else msg_Warn( p_stream, "unknown box type c%3.3s (incompletely loaded)", (char*)&p_box->i_type+1 ); p_box->e_flags |= BOX_FLAG_INCOMPLETE; return 1; } /**** ------------------------------------------------------------------- ****/ /**** "Higher level" Functions ****/ /**** ------------------------------------------------------------------- ****/ static const struct { uint32_t i_type; int (*MP4_ReadBox_function )( stream_t *p_stream, MP4_Box_t *p_box ); void (*MP4_FreeBox_function )( MP4_Box_t *p_box ); uint32_t i_parent; /* set parent to restrict, duplicating if needed; 0 for any */ } MP4_Box_Function [] = { /* Containers */ { ATOM_moov, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_foov, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_trak, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moov }, { ATOM_trak, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_foov }, { ATOM_mdia, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_trak }, { ATOM_moof, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_minf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_mdia }, { ATOM_stbl, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_dinf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_dinf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_meta }, { ATOM_edts, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_trak }, { ATOM_udta, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_nmhd, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_hnti, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_udta }, { ATOM_rmra, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moov }, { ATOM_rmda, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_rmra }, { ATOM_tref, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_trak }, { ATOM_gmhd, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_stsd }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_mp4a }, /* some quicktime mp4a/wave/mp4a.. */ { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_WMA2 }, /* flip4mac */ { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_in24 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_in32 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_fl32 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_fl64 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_QDMC }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_QDM2 }, { ATOM_ilst, MP4_ReadBox_ilst, MP4_FreeBox_Common, ATOM_meta }, { ATOM_mvex, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moov }, { ATOM_mvex, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_ftyp }, /* specific box */ { ATOM_ftyp, MP4_ReadBox_ftyp, MP4_FreeBox_ftyp, 0 }, { ATOM_cmov, MP4_ReadBox_cmov, MP4_FreeBox_Common, 0 }, { ATOM_mvhd, MP4_ReadBox_mvhd, MP4_FreeBox_Common, ATOM_moov }, { ATOM_mvhd, MP4_ReadBox_mvhd, MP4_FreeBox_Common, ATOM_foov }, { ATOM_tkhd, MP4_ReadBox_tkhd, MP4_FreeBox_Common, ATOM_trak }, { ATOM_load, MP4_ReadBox_load, MP4_FreeBox_Common, ATOM_trak }, { ATOM_mdhd, MP4_ReadBox_mdhd, MP4_FreeBox_Common, ATOM_mdia }, { ATOM_hdlr, MP4_ReadBox_hdlr, MP4_FreeBox_hdlr, ATOM_mdia }, { ATOM_hdlr, MP4_ReadBox_hdlr, MP4_FreeBox_hdlr, ATOM_meta }, { ATOM_hdlr, MP4_ReadBox_hdlr, MP4_FreeBox_hdlr, ATOM_minf }, { ATOM_vmhd, MP4_ReadBox_vmhd, MP4_FreeBox_Common, ATOM_minf }, { ATOM_smhd, MP4_ReadBox_smhd, MP4_FreeBox_Common, ATOM_minf }, { ATOM_hmhd, MP4_ReadBox_hmhd, MP4_FreeBox_Common, ATOM_minf }, { ATOM_alis, MP4_ReadBoxSkip, MP4_FreeBox_Common, ATOM_dref }, { ATOM_url, MP4_ReadBox_url, MP4_FreeBox_url, 0 }, { ATOM_urn, MP4_ReadBox_urn, MP4_FreeBox_urn, 0 }, { ATOM_dref, MP4_ReadBox_dref, MP4_FreeBox_Common, 0 }, { ATOM_stts, MP4_ReadBox_stts, MP4_FreeBox_stts, ATOM_stbl }, { ATOM_ctts, MP4_ReadBox_ctts, MP4_FreeBox_ctts, ATOM_stbl }, { ATOM_stsd, MP4_ReadBox_stsd, MP4_FreeBox_Common, ATOM_stbl }, { ATOM_stsz, MP4_ReadBox_stsz, MP4_FreeBox_stsz, ATOM_stbl }, { ATOM_stsc, MP4_ReadBox_stsc, MP4_FreeBox_stsc, ATOM_stbl }, { ATOM_stco, MP4_ReadBox_stco_co64, MP4_FreeBox_stco_co64, ATOM_stbl }, { ATOM_co64, MP4_ReadBox_stco_co64, MP4_FreeBox_stco_co64, ATOM_stbl }, { ATOM_stss, MP4_ReadBox_stss, MP4_FreeBox_stss, ATOM_stbl }, { ATOM_stsh, MP4_ReadBox_stsh, MP4_FreeBox_stsh, ATOM_stbl }, { ATOM_stdp, MP4_ReadBox_stdp, MP4_FreeBox_stdp, 0 }, { ATOM_padb, MP4_ReadBox_padb, MP4_FreeBox_padb, 0 }, { ATOM_elst, MP4_ReadBox_elst, MP4_FreeBox_elst, ATOM_edts }, { ATOM_cprt, MP4_ReadBox_cprt, MP4_FreeBox_cprt, 0 }, { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_wave }, /* mp4a in wave chunk */ { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_mp4a }, { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_mp4v }, { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_mp4s }, { ATOM_dcom, MP4_ReadBox_dcom, MP4_FreeBox_Common, 0 }, { ATOM_cmvd, MP4_ReadBox_cmvd, MP4_FreeBox_cmvd, 0 }, { ATOM_avcC, MP4_ReadBox_avcC, MP4_FreeBox_avcC, ATOM_avc1 }, { ATOM_hvcC, MP4_ReadBox_hvcC, MP4_FreeBox_hvcC, 0 }, { ATOM_dac3, MP4_ReadBox_dac3, MP4_FreeBox_Common, 0 }, { ATOM_dec3, MP4_ReadBox_dec3, MP4_FreeBox_Common, 0 }, { ATOM_dvc1, MP4_ReadBox_dvc1, MP4_FreeBox_Common, 0 }, { ATOM_enda, MP4_ReadBox_enda, MP4_FreeBox_Common, 0 }, { ATOM_iods, MP4_ReadBox_iods, MP4_FreeBox_Common, 0 }, { ATOM_pasp, MP4_ReadBox_pasp, MP4_FreeBox_Common, 0 }, { ATOM_keys, MP4_ReadBox_keys, MP4_FreeBox_keys, ATOM_meta }, /* Quicktime preview atoms, all at root */ { ATOM_pnot, MP4_ReadBox_pnot, MP4_FreeBox_Common, 0 }, { ATOM_pict, MP4_ReadBox_Binary, MP4_FreeBox_Binary, 0 }, { ATOM_PICT, MP4_ReadBox_Binary, MP4_FreeBox_Binary, 0 }, /* Nothing to do with this box */ { ATOM_mdat, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_skip, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_free, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_wide, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_binm, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, /* Subtitles */ { ATOM_tx3g, MP4_ReadBox_sample_tx3g, MP4_FreeBox_Common, 0 }, //{ ATOM_text, MP4_ReadBox_sample_text, MP4_FreeBox_Common, 0 }, /* for codecs */ { ATOM_soun, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ac3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_eac3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_lpcm, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ms02, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ms11, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ms55, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM__mp3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_mp4a, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_twos, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_sowt, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_QDMC, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_QDM2, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ima4, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_IMA4, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_dvi, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_alaw, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ulaw, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_raw, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_MAC3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_MAC6, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_Qclp, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_samr, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_sawb, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_OggS, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_alac, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_WMA2, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, /* flip4mac */ /* Sound extensions */ { ATOM_chan, MP4_ReadBox_stsdext_chan, MP4_FreeBox_stsdext_chan, 0 }, { ATOM_WMA2, MP4_ReadBox_WMA2, MP4_FreeBox_WMA2, ATOM_wave }, /* flip4mac */ { ATOM_drmi, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_vide, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mp4v, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_SVQ1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_SVQ3, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_ZyGo, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_DIVX, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_XVID, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_h263, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_s263, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_cvid, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3IV1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3iv1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3IV2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3iv2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3IVD, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3ivd, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3VID, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3vid, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mjpa, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mjpb, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_qdrw, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mp2v, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_hdv2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_WMV3, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mjqt, MP4_ReadBox_default, NULL, 0 }, /* found in mjpa/b */ { ATOM_mjht, MP4_ReadBox_default, NULL, 0 }, { ATOM_dvc, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_dvp, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_dv5n, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_dv5p, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_VP31, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_vp31, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_h264, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_jpeg, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_avc1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_yv12, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, 0 }, { ATOM_yuv2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, 0 }, { ATOM_strf, MP4_ReadBox_strf, MP4_FreeBox_strf, ATOM_WMV3 }, /* flip4mac */ { ATOM_ASF , MP4_ReadBox_ASF, MP4_FreeBox_Common, ATOM_WMV3 }, /* flip4mac */ { ATOM_ASF , MP4_ReadBox_ASF, MP4_FreeBox_Common, ATOM_wave }, /* flip4mac */ { ATOM_mp4s, MP4_ReadBox_sample_mp4s, MP4_FreeBox_Common, ATOM_stsd }, /* XXX there is 2 box where we could find this entry stbl and tref*/ { ATOM_hint, MP4_ReadBox_default, MP4_FreeBox_Common, 0 }, /* found in tref box */ { ATOM_dpnd, MP4_ReadBox_default, NULL, 0 }, { ATOM_ipir, MP4_ReadBox_default, NULL, 0 }, { ATOM_mpod, MP4_ReadBox_default, NULL, 0 }, { ATOM_chap, MP4_ReadBox_tref_generic, MP4_FreeBox_tref_generic, 0 }, /* found in hnti */ { ATOM_rtp, MP4_ReadBox_default, NULL, 0 }, /* found in rmra/rmda */ { ATOM_rdrf, MP4_ReadBox_rdrf, MP4_FreeBox_rdrf , ATOM_rmda }, { ATOM_rmdr, MP4_ReadBox_rmdr, MP4_FreeBox_Common, ATOM_rmda }, { ATOM_rmqu, MP4_ReadBox_rmqu, MP4_FreeBox_Common, ATOM_rmda }, { ATOM_rmvc, MP4_ReadBox_rmvc, MP4_FreeBox_Common, ATOM_rmda }, { ATOM_drms, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, 0 }, { ATOM_sinf, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_schi, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_user, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_key, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_iviv, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_priv, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_frma, MP4_ReadBox_frma, MP4_FreeBox_Common, ATOM_sinf }, /* and rinf */ { ATOM_frma, MP4_ReadBox_frma, MP4_FreeBox_Common, ATOM_wave }, /* flip4mac */ { ATOM_skcr, MP4_ReadBox_skcr, MP4_FreeBox_Common, 0 }, /* ilst meta tags */ { ATOM_0xa9ART, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9alb, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9cmt, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9com, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9day, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9des, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9enc, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9gen, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9grp, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9lyr, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9nam, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9too, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9trk, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9wrt, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_aART, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_atID, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, /* iTunes */ { ATOM_cnID, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, /* iTunes */ { ATOM_covr, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_disk, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_flvr, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_gnre, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_rtng, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_trkn, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_xid_, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, /* udta */ { ATOM_0x40PRM, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0x40PRQ, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9ART, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9alb, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9ard, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9arg, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9aut, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9cak, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9cmt, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9con, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9com, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9cpy, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9day, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9des, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9dir, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9dis, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9dsa, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9fmt, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9gen, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9grp, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9hst, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9inf, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9isr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lab, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lal, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lnt, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lyr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9mak, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9mal, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9mod, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9nam, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9ope, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9phg, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9PRD, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9prd, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9prf, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9pub, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9req, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9sne, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9snm, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9sol, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9src, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9st3, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9swr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9thx, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9too, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9trk, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9url, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9wrn, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9xpd, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9xyz, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_chpl, MP4_ReadBox_chpl, MP4_FreeBox_chpl, ATOM_udta }, /* nero unlabeled chapters list */ { ATOM_MCPS, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_name, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_vndr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_SDLN, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, /* udta, non meta */ { ATOM_tsel, MP4_ReadBox_tsel, MP4_FreeBox_Common, ATOM_udta }, /* iTunes/Quicktime meta info */ { ATOM_meta, MP4_ReadBox_meta, MP4_FreeBox_Common, 0 }, { ATOM_data, MP4_ReadBox_data, MP4_FreeBox_data, 0 }, /* found in smoothstreaming */ { ATOM_traf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moof }, { ATOM_mfra, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_mfhd, MP4_ReadBox_mfhd, MP4_FreeBox_Common, ATOM_moof }, { ATOM_sidx, MP4_ReadBox_sidx, MP4_FreeBox_sidx, 0 }, { ATOM_tfhd, MP4_ReadBox_tfhd, MP4_FreeBox_Common, ATOM_traf }, { ATOM_trun, MP4_ReadBox_trun, MP4_FreeBox_trun, ATOM_traf }, { ATOM_trex, MP4_ReadBox_trex, MP4_FreeBox_Common, ATOM_mvex }, { ATOM_mehd, MP4_ReadBox_mehd, MP4_FreeBox_Common, ATOM_mvex }, { ATOM_sdtp, MP4_ReadBox_sdtp, MP4_FreeBox_sdtp, 0 }, { ATOM_tfra, MP4_ReadBox_tfra, MP4_FreeBox_tfra, ATOM_mfra }, { ATOM_mfro, MP4_ReadBox_mfro, MP4_FreeBox_Common, ATOM_mfra }, { ATOM_uuid, MP4_ReadBox_uuid, MP4_FreeBox_uuid, 0 }, /* Last entry */ { 0, MP4_ReadBox_default, NULL, 0 } }; /***************************************************************************** * MP4_ReadBox : parse the actual box and the children * XXX : Do not go to the next box *****************************************************************************/ static MP4_Box_t *MP4_ReadBox( stream_t *p_stream, MP4_Box_t *p_father ) { MP4_Box_t *p_box = calloc( 1, sizeof( MP4_Box_t ) ); /* Needed to ensure simple on error handler */ unsigned int i_index; if( p_box == NULL ) return NULL; if( !MP4_ReadBoxCommon( p_stream, p_box ) ) { msg_Warn( p_stream, "cannot read one box" ); free( p_box ); return NULL; } if( !p_box->i_size ) { msg_Dbg( p_stream, "found an empty box (null size)" ); free( p_box ); return NULL; } p_box->p_father = p_father; /* Now search function to call */ for( i_index = 0; ; i_index++ ) { if ( MP4_Box_Function[i_index].i_parent && p_box->p_father && p_box->p_father->i_type != MP4_Box_Function[i_index].i_parent ) continue; if( ( MP4_Box_Function[i_index].i_type == p_box->i_type )|| ( MP4_Box_Function[i_index].i_type == 0 ) ) { break; } } if( !(MP4_Box_Function[i_index].MP4_ReadBox_function)( p_stream, p_box ) ) { off_t i_end = p_box->i_pos + p_box->i_size; MP4_BoxFree( p_stream, p_box ); stream_Seek( p_stream, i_end ); /* Skip the failed box */ return NULL; } p_box->pf_free = MP4_Box_Function[i_index].MP4_FreeBox_function; return p_box; } /***************************************************************************** * MP4_FreeBox : free memory after read with MP4_ReadBox and all * the children *****************************************************************************/ void MP4_BoxFree( stream_t *s, MP4_Box_t *p_box ) { MP4_Box_t *p_child; if( !p_box ) return; /* hehe */ for( p_child = p_box->p_first; p_child != NULL; ) { MP4_Box_t *p_next; p_next = p_child->p_next; MP4_BoxFree( s, p_child ); p_child = p_next; } /* Now search function to call */ if( p_box->data.p_payload ) { if (unlikely( p_box->pf_free == NULL )) { /* Should not happen */ if MP4_BOX_TYPE_ASCII() msg_Warn( s, "cannot free box %4.4s, type unknown", (char*)&p_box->i_type ); else msg_Warn( s, "cannot free box c%3.3s, type unknown", (char*)&p_box->i_type+1 ); } else { p_box->pf_free( p_box ); } free( p_box->data.p_payload ); } free( p_box ); } /* SmooBox is a very simple MP4 box, VLC specific, used only for the stream_filter to * send information to the demux. SmooBox is actually a simplified moov box (we wanted * to avoid the hassle of building a moov box at the stream_filter level) */ MP4_Box_t *MP4_BoxGetSmooBox( stream_t *s ) { /* p_chunk is a virtual root container for the smoo box */ MP4_Box_t *p_chunk; MP4_Box_t *p_smoo; p_chunk = calloc( 1, sizeof( MP4_Box_t ) ); if( unlikely( p_chunk == NULL ) ) return NULL; p_chunk->i_type = ATOM_root; p_chunk->i_shortsize = 1; p_smoo = MP4_ReadBox( s, p_chunk ); if( !p_smoo || p_smoo->i_type != ATOM_uuid || CmpUUID( &p_smoo->i_uuid, &SmooBoxUUID ) ) { msg_Warn( s, "no smoo box found!"); goto error; } p_chunk->p_first = p_smoo; p_chunk->p_last = p_smoo; return p_chunk; error: free( p_chunk ); return NULL; } MP4_Box_t *MP4_BoxGetNextChunk( stream_t *s ) { /* p_chunk is a virtual root container for the moof and mdat boxes */ MP4_Box_t *p_chunk; MP4_Box_t *p_tmp_box = NULL; p_tmp_box = calloc( 1, sizeof( MP4_Box_t ) ); if( unlikely( p_tmp_box == NULL ) ) return NULL; /* We might get a ftyp box or a SmooBox */ MP4_ReadBoxCommon( s, p_tmp_box ); if( (p_tmp_box->i_type == ATOM_uuid && !CmpUUID( &p_tmp_box->i_uuid, &SmooBoxUUID )) ) { free( p_tmp_box ); return MP4_BoxGetSmooBox( s ); } else if( p_tmp_box->i_type == ATOM_ftyp ) { free( p_tmp_box ); return MP4_BoxGetRoot( s ); } free( p_tmp_box ); p_chunk = calloc( 1, sizeof( MP4_Box_t ) ); if( unlikely( p_chunk == NULL ) ) return NULL; p_chunk->i_type = ATOM_root; p_chunk->i_shortsize = 1; MP4_ReadBoxContainerChildren( s, p_chunk, ATOM_moof ); p_tmp_box = p_chunk->p_first; while( p_tmp_box ) { p_chunk->i_size += p_tmp_box->i_size; p_tmp_box = p_tmp_box->p_next; } return p_chunk; } /***************************************************************************** * MP4_BoxGetRoot : Parse the entire file, and create all boxes in memory ***************************************************************************** * The first box is a virtual box "root" and is the father for all first * level boxes for the file, a sort of virtual contener *****************************************************************************/ MP4_Box_t *MP4_BoxGetRoot( stream_t *s ) { MP4_Box_t *p_root; stream_t *p_stream; int i_result; p_root = malloc( sizeof( MP4_Box_t ) ); if( p_root == NULL ) return NULL; p_root->i_pos = 0; p_root->i_type = ATOM_root; p_root->i_shortsize = 1; /* could be a DASH stream for exemple, 0 means unknown or infinite size */ p_root->i_size = 0; CreateUUID( &p_root->i_uuid, p_root->i_type ); p_root->data.p_payload = NULL; p_root->p_father = NULL; p_root->p_first = NULL; p_root->p_last = NULL; p_root->p_next = NULL; p_stream = s; /* First get the moov */ i_result = MP4_ReadBoxContainerChildren( p_stream, p_root, ATOM_moov ); if( !i_result ) goto error; /* If there is a mvex box, it means fragmented MP4, and we're done */ else if( MP4_BoxCount( p_root, "moov/mvex" ) > 0 ) return p_root; p_root->i_size = stream_Size( s ); if( stream_Tell( s ) + 8 < stream_Size( s ) ) { /* Get the rest of the file */ i_result = MP4_ReadBoxContainerRaw( p_stream, p_root ); if( !i_result ) goto error; } MP4_Box_t *p_moov; MP4_Box_t *p_cmov; /* check if there is a cmov, if so replace compressed moov by uncompressed one */ if( ( ( p_moov = MP4_BoxGet( p_root, "moov" ) ) && ( p_cmov = MP4_BoxGet( p_root, "moov/cmov" ) ) ) || ( ( p_moov = MP4_BoxGet( p_root, "foov" ) ) && ( p_cmov = MP4_BoxGet( p_root, "foov/cmov" ) ) ) ) { /* rename the compressed moov as a box to skip */ p_moov->i_type = ATOM_skip; /* get uncompressed p_moov */ p_moov = p_cmov->data.p_cmov->p_moov; p_cmov->data.p_cmov->p_moov = NULL; /* make p_root father of this new moov */ p_moov->p_father = p_root; /* insert this new moov box as first child of p_root */ p_moov->p_next = p_root->p_first; p_root->p_first = p_moov; } return p_root; error: free( p_root ); stream_Seek( p_stream, 0 ); return NULL; } static void MP4_BoxDumpStructure_Internal( stream_t *s, MP4_Box_t *p_box, unsigned int i_level ) { MP4_Box_t *p_child; uint32_t i_displayedtype = p_box->i_type; if( ! MP4_BOX_TYPE_ASCII() ) ((char*)&i_displayedtype)[0] = 'c'; if( !i_level ) { msg_Dbg( s, "dumping root Box \"%4.4s\"", (char*)&i_displayedtype ); } else { char str[512]; if( i_level >= (sizeof(str) - 1)/4 ) return; memset( str, ' ', sizeof(str) ); for( unsigned i = 0; i < i_level; i++ ) { str[i*4] = '|'; } snprintf( &str[i_level * 4], sizeof(str) - 4*i_level, "+ %4.4s size %"PRIu64" offset %" PRIuMAX "%s", (char*)&i_displayedtype, p_box->i_size, (uintmax_t)p_box->i_pos, p_box->e_flags & BOX_FLAG_INCOMPLETE ? " (\?\?\?\?)" : "" ); msg_Dbg( s, "%s", str ); } p_child = p_box->p_first; while( p_child ) { MP4_BoxDumpStructure_Internal( s, p_child, i_level + 1 ); p_child = p_child->p_next; } } void MP4_BoxDumpStructure( stream_t *s, MP4_Box_t *p_box ) { MP4_BoxDumpStructure_Internal( s, p_box, 0 ); } /***************************************************************************** ***************************************************************************** ** ** High level methods to acces an MP4 file ** ***************************************************************************** *****************************************************************************/ static void get_token( char **ppsz_path, char **ppsz_token, int *pi_number ) { size_t i_len ; if( !*ppsz_path[0] ) { *ppsz_token = NULL; *pi_number = 0; return; } i_len = strcspn( *ppsz_path, "/[" ); if( !i_len && **ppsz_path == '/' ) { i_len = 1; } *ppsz_token = strndup( *ppsz_path, i_len ); if( unlikely(!*ppsz_token) ) abort(); *ppsz_path += i_len; if( **ppsz_path == '[' ) { (*ppsz_path)++; *pi_number = strtol( *ppsz_path, NULL, 10 ); while( **ppsz_path && **ppsz_path != ']' ) { (*ppsz_path)++; } if( **ppsz_path == ']' ) { (*ppsz_path)++; } } else { *pi_number = 0; } while( **ppsz_path == '/' ) { (*ppsz_path)++; } } static void MP4_BoxGet_Internal( MP4_Box_t **pp_result, MP4_Box_t *p_box, const char *psz_fmt, va_list args) { char *psz_dup; char *psz_path; char *psz_token; if( !p_box ) { *pp_result = NULL; return; } if( vasprintf( &psz_path, psz_fmt, args ) == -1 ) psz_path = NULL; if( !psz_path || !psz_path[0] ) { free( psz_path ); *pp_result = NULL; return; } // fprintf( stderr, "path:'%s'\n", psz_path ); psz_dup = psz_path; /* keep this pointer, as it need to be unallocated */ for( ; ; ) { int i_number; get_token( &psz_path, &psz_token, &i_number ); // fprintf( stderr, "path:'%s', token:'%s' n:%d\n", // psz_path,psz_token,i_number ); if( !psz_token ) { free( psz_dup ); *pp_result = p_box; return; } else if( !strcmp( psz_token, "/" ) ) { /* Find root box */ while( p_box && p_box->i_type != ATOM_root ) { p_box = p_box->p_father; } if( !p_box ) { goto error_box; } } else if( !strcmp( psz_token, "." ) ) { /* Do nothing */ } else if( !strcmp( psz_token, ".." ) ) { p_box = p_box->p_father; if( !p_box ) { goto error_box; } } else if( strlen( psz_token ) == 4 ) { uint32_t i_fourcc; i_fourcc = VLC_FOURCC( psz_token[0], psz_token[1], psz_token[2], psz_token[3] ); p_box = p_box->p_first; for( ; ; ) { if( !p_box ) { goto error_box; } if( p_box->i_type == i_fourcc ) { if( !i_number ) { break; } i_number--; } p_box = p_box->p_next; } } else if( *psz_token == '\0' ) { p_box = p_box->p_first; for( ; ; ) { if( !p_box ) { goto error_box; } if( !i_number ) { break; } i_number--; p_box = p_box->p_next; } } else { // fprintf( stderr, "Argg malformed token \"%s\"",psz_token ); goto error_box; } FREENULL( psz_token ); } return; error_box: free( psz_token ); free( psz_dup ); *pp_result = NULL; return; } /***************************************************************************** * MP4_BoxGet: find a box given a path relative to p_box ***************************************************************************** * Path Format: . .. / as usual * [number] to specifie box number ex: trak[12] * * ex: /moov/trak[12] * ../mdia *****************************************************************************/ MP4_Box_t *MP4_BoxGet( MP4_Box_t *p_box, const char *psz_fmt, ... ) { va_list args; MP4_Box_t *p_result; va_start( args, psz_fmt ); MP4_BoxGet_Internal( &p_result, p_box, psz_fmt, args ); va_end( args ); return( p_result ); } /***************************************************************************** * MP4_BoxCount: count box given a path relative to p_box ***************************************************************************** * Path Format: . .. / as usual * [number] to specifie box number ex: trak[12] * * ex: /moov/trak[12] * ../mdia *****************************************************************************/ int MP4_BoxCount( MP4_Box_t *p_box, const char *psz_fmt, ... ) { va_list args; int i_count; MP4_Box_t *p_result, *p_next; va_start( args, psz_fmt ); MP4_BoxGet_Internal( &p_result, p_box, psz_fmt, args ); va_end( args ); if( !p_result ) { return( 0 ); } i_count = 1; for( p_next = p_result->p_next; p_next != NULL; p_next = p_next->p_next) { if( p_next->i_type == p_result->i_type) { i_count++; } } return( i_count ); }
./CrossVul/dataset_final_sorted/CWE-191/c/bad_2395_0
crossvul-cpp_data_good_2395_0
/***************************************************************************** * libmp4.c : LibMP4 library for mp4 module for vlc ***************************************************************************** * Copyright (C) 2001-2004, 2010 VLC authors and VideoLAN * * Author: Laurent Aimar <fenrir@via.ecp.fr> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_stream.h> /* stream_Peek*/ #ifdef HAVE_ZLIB_H # include <zlib.h> /* for compressed moov */ #endif #include "libmp4.h" #include "languages.h" #include <math.h> /* Some assumptions: * The input method HAS to be seekable */ /* convert 16.16 fixed point to floating point */ static double conv_fx( int32_t fx ) { double fp = fx; fp /= 65536.; return fp; } /* some functions for mp4 encoding of variables */ #ifdef MP4_VERBOSE static void MP4_ConvertDate2Str( char *psz, uint64_t i_date, bool b_relative ) { int i_day; int i_hour; int i_min; int i_sec; /* date begin at 1 jan 1904 */ if ( !b_relative ) i_date += ((INT64_C(1904) * 365) + 17) * 24 * 60 * 60; i_day = i_date / ( 60*60*24); i_hour = ( i_date /( 60*60 ) ) % 60; i_min = ( i_date / 60 ) % 60; i_sec = i_date % 60; sprintf( psz, "%dd-%2.2dh:%2.2dm:%2.2ds", i_day, i_hour, i_min, i_sec ); } #endif /***************************************************************************** * Some prototypes. *****************************************************************************/ static MP4_Box_t *MP4_ReadBox( stream_t *p_stream, MP4_Box_t *p_father ); /***************************************************************************** * MP4_ReadBoxCommon : Load only common parameters for all boxes ***************************************************************************** * p_box need to be an already allocated MP4_Box_t, and all data * will only be peek not read * * RETURN : 0 if it fail, 1 otherwise *****************************************************************************/ int MP4_ReadBoxCommon( stream_t *p_stream, MP4_Box_t *p_box ) { int i_read; const uint8_t *p_peek; if( ( ( i_read = stream_Peek( p_stream, &p_peek, 32 ) ) < 8 ) ) { return 0; } p_box->i_pos = stream_Tell( p_stream ); p_box->data.p_payload = NULL; p_box->p_father = NULL; p_box->p_first = NULL; p_box->p_last = NULL; p_box->p_next = NULL; MP4_GET4BYTES( p_box->i_shortsize ); MP4_GETFOURCC( p_box->i_type ); /* Now special case */ if( p_box->i_shortsize == 1 ) { /* get the true size on 64 bits */ MP4_GET8BYTES( p_box->i_size ); } else { p_box->i_size = p_box->i_shortsize; /* XXX size of 0 means that the box extends to end of file */ } if( p_box->i_type == ATOM_uuid ) { /* get extented type on 16 bytes */ GetUUID( &p_box->i_uuid, p_peek ); p_peek += 16; i_read -= 16; } else { CreateUUID( &p_box->i_uuid, p_box->i_type ); } #ifdef MP4_ULTRA_VERBOSE if( p_box->i_size ) { if MP4_BOX_TYPE_ASCII() msg_Dbg( p_stream, "found Box: %4.4s size %"PRId64" %"PRId64, (char*)&p_box->i_type, p_box->i_size, p_box->i_pos ); else msg_Dbg( p_stream, "found Box: c%3.3s size %"PRId64, (char*)&p_box->i_type+1, p_box->i_size ); } #endif return 1; } /***************************************************************************** * MP4_NextBox : Go to the next box ***************************************************************************** * if p_box == NULL, go to the next box in which we are( at the begining ). *****************************************************************************/ static int MP4_NextBox( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_t box; if( !p_box ) { if ( !MP4_ReadBoxCommon( p_stream, &box ) ) return 0; p_box = &box; } if( !p_box->i_size ) { return 2; /* Box with infinite size */ } if( p_box->p_father ) { /* if father's size == 0, it means unknown or infinite size, * and we skip the followong check */ if( p_box->p_father->i_size > 0 ) { const off_t i_box_end = p_box->i_size + p_box->i_pos; const off_t i_father_end = p_box->p_father->i_size + p_box->p_father->i_pos; /* check if it's within p-father */ if( i_box_end >= i_father_end ) { if( i_box_end > i_father_end ) msg_Dbg( p_stream, "out of bound child" ); return 0; /* out of bound */ } } } if( stream_Seek( p_stream, p_box->i_size + p_box->i_pos ) ) { return 0; } return 1; } /***************************************************************************** * For all known box a loader is given, * XXX: all common struct have to be already read by MP4_ReadBoxCommon * after called one of theses functions, file position is unknown * you need to call MP4_GotoBox to go where you want *****************************************************************************/ static int MP4_ReadBoxContainerChildrenIndexed( stream_t *p_stream, MP4_Box_t *p_container, uint32_t i_last_child, bool b_indexed ) { MP4_Box_t *p_box; /* Size of root container is set to 0 when unknown, for exemple * with a DASH stream. In that case, we skip the following check */ if( p_container->i_size && ( stream_Tell( p_stream ) + ((b_indexed)?16:8) > (off_t)(p_container->i_pos + p_container->i_size) ) ) { /* there is no box to load */ return 0; } do { uint32_t i_index = 0; if ( b_indexed ) { uint8_t read[8]; if ( stream_Read( p_stream, read, 8 ) < 8 ) return 0; i_index = GetDWBE(&read[4]); } if( ( p_box = MP4_ReadBox( p_stream, p_container ) ) == NULL ) continue; p_box->i_index = i_index; /* chain this box with the father and the other at same level */ if( !p_container->p_first ) p_container->p_first = p_box; else p_container->p_last->p_next = p_box; p_container->p_last = p_box; if( p_box->i_type == i_last_child ) { MP4_NextBox( p_stream, p_box ); break; } } while( MP4_NextBox( p_stream, p_box ) == 1 ); return 1; } int MP4_ReadBoxContainerChildren( stream_t *p_stream, MP4_Box_t *p_container, uint32_t i_last_child ) { return MP4_ReadBoxContainerChildrenIndexed( p_stream, p_container, i_last_child, false ); } static int MP4_ReadBoxContainerRaw( stream_t *p_stream, MP4_Box_t *p_container ) { return MP4_ReadBoxContainerChildren( p_stream, p_container, 0 ); } static int MP4_ReadBoxContainer( stream_t *p_stream, MP4_Box_t *p_container ) { if( p_container->i_size && ( p_container->i_size <= (size_t)mp4_box_headersize(p_container ) + 8 ) ) { /* container is empty, 8 stand for the first header in this box */ return 1; } /* enter box */ stream_Seek( p_stream, p_container->i_pos + mp4_box_headersize( p_container ) ); return MP4_ReadBoxContainerRaw( p_stream, p_container ); } static void MP4_FreeBox_Common( MP4_Box_t *p_box ) { /* Up to now do nothing */ (void)p_box; } static int MP4_ReadBoxSkip( stream_t *p_stream, MP4_Box_t *p_box ) { /* XXX sometime moov is hiden in a free box */ if( p_box->p_father && p_box->p_father->i_type == ATOM_root && p_box->i_type == ATOM_free ) { const uint8_t *p_peek; int i_read; vlc_fourcc_t i_fcc; i_read = stream_Peek( p_stream, &p_peek, 44 ); p_peek += mp4_box_headersize( p_box ) + 4; i_read -= mp4_box_headersize( p_box ) + 4; if( i_read >= 8 ) { i_fcc = VLC_FOURCC( p_peek[0], p_peek[1], p_peek[2], p_peek[3] ); if( i_fcc == ATOM_cmov || i_fcc == ATOM_mvhd ) { msg_Warn( p_stream, "detected moov hidden in a free box ..." ); p_box->i_type = ATOM_foov; return MP4_ReadBoxContainer( p_stream, p_box ); } } } /* Nothing to do */ #ifdef MP4_ULTRA_VERBOSE if MP4_BOX_TYPE_ASCII() msg_Dbg( p_stream, "skip box: \"%4.4s\"", (char*)&p_box->i_type ); else msg_Dbg( p_stream, "skip box: \"c%3.3s\"", (char*)&p_box->i_type+1 ); #endif return 1; } static int MP4_ReadBox_ilst( stream_t *p_stream, MP4_Box_t *p_box ) { if( p_box->i_size < 8 || stream_Read( p_stream, NULL, 8 ) < 8 ) return 0; /* Find our handler */ if ( !p_box->i_handler && p_box->p_father ) { const MP4_Box_t *p_sibling = p_box->p_father->p_first; while( p_sibling ) { if ( p_sibling->i_type == ATOM_hdlr && p_sibling->data.p_hdlr ) { p_box->i_handler = p_sibling->data.p_hdlr->i_handler_type; break; } p_sibling = p_sibling->p_next; } } switch( p_box->i_handler ) { case 0: msg_Warn( p_stream, "no handler for ilst atom" ); return 0; case HANDLER_mdta: return MP4_ReadBoxContainerChildrenIndexed( p_stream, p_box, 0, true ); case HANDLER_mdir: return MP4_ReadBoxContainerChildren( p_stream, p_box, 0 ); default: msg_Warn( p_stream, "Unknown ilst handler type '%4.4s'", (char*)&p_box->i_handler ); return 0; } } static int MP4_ReadBox_ftyp( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_ftyp_t ); MP4_GETFOURCC( p_box->data.p_ftyp->i_major_brand ); MP4_GET4BYTES( p_box->data.p_ftyp->i_minor_version ); if( ( p_box->data.p_ftyp->i_compatible_brands_count = i_read / 4 ) ) { uint32_t *tab = p_box->data.p_ftyp->i_compatible_brands = calloc( p_box->data.p_ftyp->i_compatible_brands_count, sizeof(uint32_t)); if( unlikely( tab == NULL ) ) MP4_READBOX_EXIT( 0 ); for( unsigned i = 0; i < p_box->data.p_ftyp->i_compatible_brands_count; i++ ) { MP4_GETFOURCC( tab[i] ); } } else { p_box->data.p_ftyp->i_compatible_brands = NULL; } MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_ftyp( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_ftyp->i_compatible_brands ); } static int MP4_ReadBox_mvhd( stream_t *p_stream, MP4_Box_t *p_box ) { #ifdef MP4_VERBOSE char s_creation_time[128]; char s_modification_time[128]; char s_duration[128]; #endif MP4_READBOX_ENTER( MP4_Box_data_mvhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mvhd ); if( p_box->data.p_mvhd->i_version ) { MP4_GET8BYTES( p_box->data.p_mvhd->i_creation_time ); MP4_GET8BYTES( p_box->data.p_mvhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mvhd->i_timescale ); MP4_GET8BYTES( p_box->data.p_mvhd->i_duration ); } else { MP4_GET4BYTES( p_box->data.p_mvhd->i_creation_time ); MP4_GET4BYTES( p_box->data.p_mvhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mvhd->i_timescale ); MP4_GET4BYTES( p_box->data.p_mvhd->i_duration ); } MP4_GET4BYTES( p_box->data.p_mvhd->i_rate ); MP4_GET2BYTES( p_box->data.p_mvhd->i_volume ); MP4_GET2BYTES( p_box->data.p_mvhd->i_reserved1 ); for( unsigned i = 0; i < 2; i++ ) { MP4_GET4BYTES( p_box->data.p_mvhd->i_reserved2[i] ); } for( unsigned i = 0; i < 9; i++ ) { MP4_GET4BYTES( p_box->data.p_mvhd->i_matrix[i] ); } for( unsigned i = 0; i < 6; i++ ) { MP4_GET4BYTES( p_box->data.p_mvhd->i_predefined[i] ); } MP4_GET4BYTES( p_box->data.p_mvhd->i_next_track_id ); #ifdef MP4_VERBOSE MP4_ConvertDate2Str( s_creation_time, p_box->data.p_mvhd->i_creation_time, false ); MP4_ConvertDate2Str( s_modification_time, p_box->data.p_mvhd->i_modification_time, false ); if( p_box->data.p_mvhd->i_rate ) { MP4_ConvertDate2Str( s_duration, p_box->data.p_mvhd->i_duration / p_box->data.p_mvhd->i_rate, true ); } else { s_duration[0] = 0; } msg_Dbg( p_stream, "read box: \"mvhd\" creation %s modification %s time scale %d duration %s rate %f volume %f next track id %d", s_creation_time, s_modification_time, (uint32_t)p_box->data.p_mvhd->i_timescale, s_duration, (float)p_box->data.p_mvhd->i_rate / (1<<16 ), (float)p_box->data.p_mvhd->i_volume / 256 , (uint32_t)p_box->data.p_mvhd->i_next_track_id ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mfhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_mfhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mvhd ); MP4_GET4BYTES( p_box->data.p_mfhd->i_sequence_number ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"mfhd\" sequence number %d", p_box->data.p_mfhd->i_sequence_number ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tfxd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tfxd_t ); MP4_Box_data_tfxd_t *p_tfxd_data = p_box->data.p_tfxd; MP4_GETVERSIONFLAGS( p_tfxd_data ); if( p_tfxd_data->i_version == 0 ) { MP4_GET4BYTES( p_tfxd_data->i_fragment_abs_time ); MP4_GET4BYTES( p_tfxd_data->i_fragment_duration ); } else { MP4_GET8BYTES( p_tfxd_data->i_fragment_abs_time ); MP4_GET8BYTES( p_tfxd_data->i_fragment_duration ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"tfxd\" version %d, flags 0x%x, "\ "fragment duration %"PRIu64", fragment abs time %"PRIu64, p_tfxd_data->i_version, p_tfxd_data->i_flags, p_tfxd_data->i_fragment_duration, p_tfxd_data->i_fragment_abs_time ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tfrf( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tfxd_t ); MP4_Box_data_tfrf_t *p_tfrf_data = p_box->data.p_tfrf; MP4_GETVERSIONFLAGS( p_tfrf_data ); MP4_GET1BYTE( p_tfrf_data->i_fragment_count ); p_tfrf_data->p_tfrf_data_fields = calloc( p_tfrf_data->i_fragment_count, sizeof( TfrfBoxDataFields_t ) ); if( !p_tfrf_data->p_tfrf_data_fields ) MP4_READBOX_EXIT( 0 ); for( uint8_t i = 0; i < p_tfrf_data->i_fragment_count; i++ ) { TfrfBoxDataFields_t *TfrfBoxDataField = &p_tfrf_data->p_tfrf_data_fields[i]; if( p_tfrf_data->i_version == 0 ) { MP4_GET4BYTES( TfrfBoxDataField->i_fragment_abs_time ); MP4_GET4BYTES( TfrfBoxDataField->i_fragment_duration ); } else { MP4_GET8BYTES( TfrfBoxDataField->i_fragment_abs_time ); MP4_GET8BYTES( TfrfBoxDataField->i_fragment_duration ); } } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"tfrf\" version %d, flags 0x%x, "\ "fragment count %"PRIu8, p_tfrf_data->i_version, p_tfrf_data->i_flags, p_tfrf_data->i_fragment_count ); for( uint8_t i = 0; i < p_tfrf_data->i_fragment_count; i++ ) { TfrfBoxDataFields_t *TfrfBoxDataField = &p_tfrf_data->p_tfrf_data_fields[i]; msg_Dbg( p_stream, "\"tfrf\" fragment duration %"PRIu64", "\ "fragment abs time %"PRIu64, TfrfBoxDataField->i_fragment_duration, TfrfBoxDataField->i_fragment_abs_time ); } #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_tfrf( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_tfrf->p_tfrf_data_fields ); } static int MP4_ReadBox_stra( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stra_t ); MP4_Box_data_stra_t *p_stra = p_box->data.p_stra; uint8_t i_reserved; VLC_UNUSED(i_reserved); MP4_GET1BYTE( p_stra->i_es_cat ); MP4_GET1BYTE( i_reserved ); MP4_GET2BYTES( p_stra->i_track_ID ); MP4_GET4BYTES( p_stra->i_timescale ); MP4_GET8BYTES( p_stra->i_duration ); MP4_GET4BYTES( p_stra->FourCC ); MP4_GET4BYTES( p_stra->Bitrate ); MP4_GET4BYTES( p_stra->MaxWidth ); MP4_GET4BYTES( p_stra->MaxHeight ); MP4_GET4BYTES( p_stra->SamplingRate ); MP4_GET4BYTES( p_stra->Channels ); MP4_GET4BYTES( p_stra->BitsPerSample ); MP4_GET4BYTES( p_stra->AudioTag ); MP4_GET2BYTES( p_stra->nBlockAlign ); MP4_GET1BYTE( i_reserved ); MP4_GET1BYTE( i_reserved ); MP4_GET1BYTE( i_reserved ); MP4_GET1BYTE( p_stra->cpd_len ); if( p_stra->cpd_len > i_read ) goto error; p_stra->CodecPrivateData = malloc( p_stra->cpd_len ); if( unlikely( p_stra->CodecPrivateData == NULL ) ) goto error; memcpy( p_stra->CodecPrivateData, p_peek, p_stra->cpd_len ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "es_cat is %"PRIu8", birate is %"PRIu32, p_stra->i_es_cat, p_stra->Bitrate ); #endif MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); } static void MP4_FreeBox_stra( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stra->CodecPrivateData ); } static int MP4_ReadBox_uuid( stream_t *p_stream, MP4_Box_t *p_box ) { if( !CmpUUID( &p_box->i_uuid, &TfrfBoxUUID ) ) return MP4_ReadBox_tfrf( p_stream, p_box ); if( !CmpUUID( &p_box->i_uuid, &TfxdBoxUUID ) ) return MP4_ReadBox_tfxd( p_stream, p_box ); if( !CmpUUID( &p_box->i_uuid, &SmooBoxUUID ) ) return MP4_ReadBoxContainer( p_stream, p_box ); if( !CmpUUID( &p_box->i_uuid, &StraBoxUUID ) ) return MP4_ReadBox_stra( p_stream, p_box ); msg_Warn( p_stream, "Unknown uuid type box" ); return 1; } static void MP4_FreeBox_uuid( MP4_Box_t *p_box ) { if( !CmpUUID( &p_box->i_uuid, &TfrfBoxUUID ) ) return MP4_FreeBox_tfrf( p_box ); if( !CmpUUID( &p_box->i_uuid, &TfxdBoxUUID ) ) return MP4_FreeBox_Common( p_box ); if( !CmpUUID( &p_box->i_uuid, &SmooBoxUUID ) ) return MP4_FreeBox_Common( p_box ); if( !CmpUUID( &p_box->i_uuid, &StraBoxUUID ) ) return MP4_FreeBox_stra( p_box ); } static int MP4_ReadBox_sidx( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_sidx_t ); MP4_Box_data_sidx_t *p_sidx_data = p_box->data.p_sidx; MP4_GETVERSIONFLAGS( p_sidx_data ); MP4_GET4BYTES( p_sidx_data->i_reference_ID ); MP4_GET4BYTES( p_sidx_data->i_timescale ); if( p_sidx_data->i_version == 0 ) { MP4_GET4BYTES( p_sidx_data->i_earliest_presentation_time ); MP4_GET4BYTES( p_sidx_data->i_first_offset ); } else { MP4_GET8BYTES( p_sidx_data->i_earliest_presentation_time ); MP4_GET8BYTES( p_sidx_data->i_first_offset ); } uint16_t i_reserved; VLC_UNUSED(i_reserved); MP4_GET2BYTES( i_reserved ); MP4_GET2BYTES( p_sidx_data->i_reference_count ); uint16_t i_count = p_sidx_data->i_reference_count; p_sidx_data->p_items = calloc( i_count, sizeof( MP4_Box_sidx_item_t ) ); uint32_t tmp; for( unsigned i = 0; i < i_count; i++ ) { MP4_GET4BYTES( tmp ); p_sidx_data->p_items[i].b_reference_type = (bool)((tmp & 0x80000000)>>24); p_sidx_data->p_items[i].i_referenced_size = tmp & 0x7fffffff; MP4_GET4BYTES( p_sidx_data->p_items[i].i_subsegment_duration ); MP4_GET4BYTES( tmp ); p_sidx_data->p_items[i].b_starts_with_SAP = (bool)((tmp & 0x80000000)>>24); p_sidx_data->p_items[i].i_SAP_type = (tmp & 0x70000000)>>24; p_sidx_data->p_items[i].i_SAP_delta_time = tmp & 0xfffffff; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"sidx\" version %d, flags 0x%x, "\ "ref_ID %"PRIu32", timescale %"PRIu32", ref_count %"PRIu16", "\ "first subsegmt duration %"PRIu32, p_sidx_data->i_version, p_sidx_data->i_flags, p_sidx_data->i_reference_ID, p_sidx_data->i_timescale, p_sidx_data->i_reference_count, p_sidx_data->p_items[0].i_subsegment_duration ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_sidx( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sidx->p_items ); } static int MP4_ReadBox_tfhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tfhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_tfhd ); if( p_box->data.p_tfhd->i_version != 0 ) { msg_Warn( p_stream, "'tfhd' box with version != 0. "\ " Don't know what to do with that, please patch" ); MP4_READBOX_EXIT( 0 ); } MP4_GET4BYTES( p_box->data.p_tfhd->i_track_ID ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DURATION_IS_EMPTY ) { msg_Dbg( p_stream, "'duration-is-empty' flag is present "\ "=> no samples for this time interval." ); p_box->data.p_tfhd->b_empty = true; } else p_box->data.p_tfhd->b_empty = false; if( p_box->data.p_tfhd->i_flags & MP4_TFHD_BASE_DATA_OFFSET ) MP4_GET8BYTES( p_box->data.p_tfhd->i_base_data_offset ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_SAMPLE_DESC_INDEX ) MP4_GET4BYTES( p_box->data.p_tfhd->i_sample_description_index ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_DURATION ) MP4_GET4BYTES( p_box->data.p_tfhd->i_default_sample_duration ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_SIZE ) MP4_GET4BYTES( p_box->data.p_tfhd->i_default_sample_size ); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_FLAGS ) MP4_GET4BYTES( p_box->data.p_tfhd->i_default_sample_flags ); #ifdef MP4_VERBOSE char psz_base[128] = "\0"; char psz_desc[128] = "\0"; char psz_dura[128] = "\0"; char psz_size[128] = "\0"; char psz_flag[128] = "\0"; if( p_box->data.p_tfhd->i_flags & MP4_TFHD_BASE_DATA_OFFSET ) snprintf(psz_base, sizeof(psz_base), "base offset %"PRId64, p_box->data.p_tfhd->i_base_data_offset); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_SAMPLE_DESC_INDEX ) snprintf(psz_desc, sizeof(psz_desc), "sample description index %d", p_box->data.p_tfhd->i_sample_description_index); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_DURATION ) snprintf(psz_dura, sizeof(psz_dura), "sample duration %d", p_box->data.p_tfhd->i_default_sample_duration); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_SIZE ) snprintf(psz_size, sizeof(psz_size), "sample size %d", p_box->data.p_tfhd->i_default_sample_size); if( p_box->data.p_tfhd->i_flags & MP4_TFHD_DFLT_SAMPLE_FLAGS ) snprintf(psz_flag, sizeof(psz_flag), "sample flags 0x%x", p_box->data.p_tfhd->i_default_sample_flags); msg_Dbg( p_stream, "read box: \"tfhd\" version %d flags 0x%x track ID %d %s %s %s %s %s", p_box->data.p_tfhd->i_version, p_box->data.p_tfhd->i_flags, p_box->data.p_tfhd->i_track_ID, psz_base, psz_desc, psz_dura, psz_size, psz_flag ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_trun( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_trun_t ); MP4_GETVERSIONFLAGS( p_box->data.p_trun ); MP4_GET4BYTES( p_box->data.p_trun->i_sample_count ); if( p_box->data.p_trun->i_flags & MP4_TRUN_DATA_OFFSET ) MP4_GET4BYTES( p_box->data.p_trun->i_data_offset ); if( p_box->data.p_trun->i_flags & MP4_TRUN_FIRST_FLAGS ) MP4_GET4BYTES( p_box->data.p_trun->i_first_sample_flags ); p_box->data.p_trun->p_samples = calloc( p_box->data.p_trun->i_sample_count, sizeof(MP4_descriptor_trun_sample_t) ); if ( p_box->data.p_trun->p_samples == NULL ) MP4_READBOX_EXIT( 0 ); for( unsigned int i = 0; i<p_box->data.p_trun->i_sample_count; i++ ) { MP4_descriptor_trun_sample_t *p_sample = &p_box->data.p_trun->p_samples[i]; if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_DURATION ) MP4_GET4BYTES( p_sample->i_duration ); if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_SIZE ) MP4_GET4BYTES( p_sample->i_size ); if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_FLAGS ) MP4_GET4BYTES( p_sample->i_flags ); if( p_box->data.p_trun->i_flags & MP4_TRUN_SAMPLE_TIME_OFFSET ) MP4_GET4BYTES( p_sample->i_composition_time_offset ); } #ifdef MP4_ULTRA_VERBOSE msg_Dbg( p_stream, "read box: \"trun\" version %u flags 0x%x sample count %u", p_box->data.p_trun->i_version, p_box->data.p_trun->i_flags, p_box->data.p_trun->i_sample_count ); for( unsigned int i = 0; i<p_box->data.p_trun->i_sample_count; i++ ) { MP4_descriptor_trun_sample_t *p_sample = &p_box->data.p_trun->p_samples[i]; msg_Dbg( p_stream, "read box: \"trun\" sample %4.4u flags 0x%x "\ "duration %"PRIu32" size %"PRIu32" composition time offset %"PRIu32, i, p_sample->i_flags, p_sample->i_duration, p_sample->i_size, p_sample->i_composition_time_offset ); } #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_trun( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_trun->p_samples ); } static int MP4_ReadBox_tkhd( stream_t *p_stream, MP4_Box_t *p_box ) { #ifdef MP4_VERBOSE char s_creation_time[128]; char s_modification_time[128]; char s_duration[128]; #endif MP4_READBOX_ENTER( MP4_Box_data_tkhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_tkhd ); if( p_box->data.p_tkhd->i_version ) { MP4_GET8BYTES( p_box->data.p_tkhd->i_creation_time ); MP4_GET8BYTES( p_box->data.p_tkhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_tkhd->i_track_ID ); MP4_GET4BYTES( p_box->data.p_tkhd->i_reserved ); MP4_GET8BYTES( p_box->data.p_tkhd->i_duration ); } else { MP4_GET4BYTES( p_box->data.p_tkhd->i_creation_time ); MP4_GET4BYTES( p_box->data.p_tkhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_tkhd->i_track_ID ); MP4_GET4BYTES( p_box->data.p_tkhd->i_reserved ); MP4_GET4BYTES( p_box->data.p_tkhd->i_duration ); } for( unsigned i = 0; i < 2; i++ ) { MP4_GET4BYTES( p_box->data.p_tkhd->i_reserved2[i] ); } MP4_GET2BYTES( p_box->data.p_tkhd->i_layer ); MP4_GET2BYTES( p_box->data.p_tkhd->i_predefined ); MP4_GET2BYTES( p_box->data.p_tkhd->i_volume ); MP4_GET2BYTES( p_box->data.p_tkhd->i_reserved3 ); for( unsigned i = 0; i < 9; i++ ) { MP4_GET4BYTES( p_box->data.p_tkhd->i_matrix[i] ); } MP4_GET4BYTES( p_box->data.p_tkhd->i_width ); MP4_GET4BYTES( p_box->data.p_tkhd->i_height ); double rotation; //angle in degrees to be rotated clockwise double scale[2]; // scale factor; sx = scale[0] , sy = scale[1] double translate[2];// amount to translate; tx = translate[0] , ty = translate[1] int32_t *matrix = p_box->data.p_tkhd->i_matrix; translate[0] = conv_fx(matrix[6]); translate[1] = conv_fx(matrix[7]); scale[0] = sqrt(conv_fx(matrix[0]) * conv_fx(matrix[0]) + conv_fx(matrix[3]) * conv_fx(matrix[3])); scale[1] = sqrt(conv_fx(matrix[1]) * conv_fx(matrix[1]) + conv_fx(matrix[4]) * conv_fx(matrix[4])); rotation = atan2(conv_fx(matrix[1]) / scale[1], conv_fx(matrix[0]) / scale[0]) * 180 / M_PI; if (rotation < 0) rotation += 360.; p_box->data.p_tkhd->f_rotation = rotation; #ifdef MP4_VERBOSE MP4_ConvertDate2Str( s_creation_time, p_box->data.p_mvhd->i_creation_time, false ); MP4_ConvertDate2Str( s_modification_time, p_box->data.p_mvhd->i_modification_time, false ); MP4_ConvertDate2Str( s_duration, p_box->data.p_mvhd->i_duration, true ); msg_Dbg( p_stream, "read box: \"tkhd\" creation %s modification %s duration %s track ID %d layer %d volume %f rotation %f scaleX %f scaleY %f translateX %f translateY %f width %f height %f. " "Matrix: %i %i %i %i %i %i %i %i %i", s_creation_time, s_modification_time, s_duration, p_box->data.p_tkhd->i_track_ID, p_box->data.p_tkhd->i_layer, (float)p_box->data.p_tkhd->i_volume / 256 , rotation, scale[0], scale[1], translate[0], translate[1], (float)p_box->data.p_tkhd->i_width / BLOCK16x16, (float)p_box->data.p_tkhd->i_height / BLOCK16x16, p_box->data.p_tkhd->i_matrix[0], p_box->data.p_tkhd->i_matrix[1], p_box->data.p_tkhd->i_matrix[2], p_box->data.p_tkhd->i_matrix[3], p_box->data.p_tkhd->i_matrix[4], p_box->data.p_tkhd->i_matrix[5], p_box->data.p_tkhd->i_matrix[6], p_box->data.p_tkhd->i_matrix[7], p_box->data.p_tkhd->i_matrix[8] ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_load( stream_t *p_stream, MP4_Box_t *p_box ) { if ( p_box->i_size != 24 ) return 0; MP4_READBOX_ENTER( MP4_Box_data_load_t ); MP4_GET4BYTES( p_box->data.p_load->i_start_time ); MP4_GET4BYTES( p_box->data.p_load->i_duration ); MP4_GET4BYTES( p_box->data.p_load->i_flags ); MP4_GET4BYTES( p_box->data.p_load->i_hints ); MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mdhd( stream_t *p_stream, MP4_Box_t *p_box ) { uint16_t i_language; #ifdef MP4_VERBOSE char s_creation_time[128]; char s_modification_time[128]; char s_duration[128]; #endif MP4_READBOX_ENTER( MP4_Box_data_mdhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mdhd ); if( p_box->data.p_mdhd->i_version ) { MP4_GET8BYTES( p_box->data.p_mdhd->i_creation_time ); MP4_GET8BYTES( p_box->data.p_mdhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mdhd->i_timescale ); MP4_GET8BYTES( p_box->data.p_mdhd->i_duration ); } else { MP4_GET4BYTES( p_box->data.p_mdhd->i_creation_time ); MP4_GET4BYTES( p_box->data.p_mdhd->i_modification_time ); MP4_GET4BYTES( p_box->data.p_mdhd->i_timescale ); MP4_GET4BYTES( p_box->data.p_mdhd->i_duration ); } MP4_GET2BYTES( i_language ); decodeQtLanguageCode( i_language, p_box->data.p_mdhd->rgs_language, &p_box->data.p_mdhd->b_mac_encoding ); MP4_GET2BYTES( p_box->data.p_mdhd->i_quality ); #ifdef MP4_VERBOSE MP4_ConvertDate2Str( s_creation_time, p_box->data.p_mdhd->i_creation_time, false ); MP4_ConvertDate2Str( s_modification_time, p_box->data.p_mdhd->i_modification_time, false ); MP4_ConvertDate2Str( s_duration, p_box->data.p_mdhd->i_duration, true ); msg_Dbg( p_stream, "read box: \"mdhd\" creation %s modification %s time scale %d duration %s language %3.3s", s_creation_time, s_modification_time, (uint32_t)p_box->data.p_mdhd->i_timescale, s_duration, (char*) &p_box->data.p_mdhd->rgs_language ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_hdlr( stream_t *p_stream, MP4_Box_t *p_box ) { int32_t i_reserved; VLC_UNUSED(i_reserved); MP4_READBOX_ENTER( MP4_Box_data_hdlr_t ); MP4_GETVERSIONFLAGS( p_box->data.p_hdlr ); MP4_GETFOURCC( p_box->data.p_hdlr->i_predefined ); MP4_GETFOURCC( p_box->data.p_hdlr->i_handler_type ); MP4_GET4BYTES( i_reserved ); MP4_GET4BYTES( i_reserved ); MP4_GET4BYTES( i_reserved ); p_box->data.p_hdlr->psz_name = NULL; if( i_read > 0 ) { uint8_t *psz = p_box->data.p_hdlr->psz_name = malloc( i_read + 1 ); if( unlikely( psz == NULL ) ) MP4_READBOX_EXIT( 0 ); /* Yes, I love .mp4 :( */ if( p_box->data.p_hdlr->i_predefined == VLC_FOURCC( 'm', 'h', 'l', 'r' ) ) { uint8_t i_len; int i_copy; MP4_GET1BYTE( i_len ); i_copy = __MIN( i_read, i_len ); memcpy( psz, p_peek, i_copy ); p_box->data.p_hdlr->psz_name[i_copy] = '\0'; } else { memcpy( psz, p_peek, i_read ); p_box->data.p_hdlr->psz_name[i_read] = '\0'; } } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"hdlr\" handler type: \"%4.4s\" name: \"%s\"", (char*)&p_box->data.p_hdlr->i_handler_type, p_box->data.p_hdlr->psz_name ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_hdlr( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_hdlr->psz_name ); } static int MP4_ReadBox_vmhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_vmhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_vmhd ); MP4_GET2BYTES( p_box->data.p_vmhd->i_graphics_mode ); for( unsigned i = 0; i < 3; i++ ) { MP4_GET2BYTES( p_box->data.p_vmhd->i_opcolor[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"vmhd\" graphics-mode %d opcolor (%d, %d, %d)", p_box->data.p_vmhd->i_graphics_mode, p_box->data.p_vmhd->i_opcolor[0], p_box->data.p_vmhd->i_opcolor[1], p_box->data.p_vmhd->i_opcolor[2] ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_smhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_smhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_smhd ); MP4_GET2BYTES( p_box->data.p_smhd->i_balance ); MP4_GET2BYTES( p_box->data.p_smhd->i_reserved ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"smhd\" balance %f", (float)p_box->data.p_smhd->i_balance / 256 ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_hmhd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_hmhd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_hmhd ); MP4_GET2BYTES( p_box->data.p_hmhd->i_max_PDU_size ); MP4_GET2BYTES( p_box->data.p_hmhd->i_avg_PDU_size ); MP4_GET4BYTES( p_box->data.p_hmhd->i_max_bitrate ); MP4_GET4BYTES( p_box->data.p_hmhd->i_avg_bitrate ); MP4_GET4BYTES( p_box->data.p_hmhd->i_reserved ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"hmhd\" maxPDU-size %d avgPDU-size %d max-bitrate %d avg-bitrate %d", p_box->data.p_hmhd->i_max_PDU_size, p_box->data.p_hmhd->i_avg_PDU_size, p_box->data.p_hmhd->i_max_bitrate, p_box->data.p_hmhd->i_avg_bitrate ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_url( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_url_t ); MP4_GETVERSIONFLAGS( p_box->data.p_url ); MP4_GETSTRINGZ( p_box->data.p_url->psz_location ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"url\" url: %s", p_box->data.p_url->psz_location ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_url( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_url->psz_location ); } static int MP4_ReadBox_urn( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_urn_t ); MP4_GETVERSIONFLAGS( p_box->data.p_urn ); MP4_GETSTRINGZ( p_box->data.p_urn->psz_name ); MP4_GETSTRINGZ( p_box->data.p_urn->psz_location ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"urn\" name %s location %s", p_box->data.p_urn->psz_name, p_box->data.p_urn->psz_location ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_urn( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_urn->psz_name ); FREENULL( p_box->data.p_urn->psz_location ); } static int MP4_ReadBox_dref( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dref_t ); MP4_GETVERSIONFLAGS( p_box->data.p_dref ); MP4_GET4BYTES( p_box->data.p_dref->i_entry_count ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dref\" entry-count %d", p_box->data.p_dref->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stts( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stts->pi_sample_count ); FREENULL( p_box->data.p_stts->pi_sample_delta ); } static int MP4_ReadBox_stts( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stts_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stts ); MP4_GET4BYTES( p_box->data.p_stts->i_entry_count ); p_box->data.p_stts->pi_sample_count = calloc( p_box->data.p_stts->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stts->pi_sample_delta = calloc( p_box->data.p_stts->i_entry_count, sizeof(int32_t) ); if( p_box->data.p_stts->pi_sample_count == NULL || p_box->data.p_stts->pi_sample_delta == NULL ) { MP4_READBOX_EXIT( 0 ); } uint32_t i = 0; for( ; (i < p_box->data.p_stts->i_entry_count )&&( i_read >=8 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stts->pi_sample_count[i] ); MP4_GET4BYTES( p_box->data.p_stts->pi_sample_delta[i] ); } if ( i < p_box->data.p_stts->i_entry_count ) p_box->data.p_stts->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stts\" entry-count %d", p_box->data.p_stts->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_ctts( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_ctts->pi_sample_count ); FREENULL( p_box->data.p_ctts->pi_sample_offset ); } static int MP4_ReadBox_ctts( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_ctts_t ); MP4_GETVERSIONFLAGS( p_box->data.p_ctts ); MP4_GET4BYTES( p_box->data.p_ctts->i_entry_count ); p_box->data.p_ctts->pi_sample_count = calloc( p_box->data.p_ctts->i_entry_count, sizeof(uint32_t) ); p_box->data.p_ctts->pi_sample_offset = calloc( p_box->data.p_ctts->i_entry_count, sizeof(int32_t) ); if( ( p_box->data.p_ctts->pi_sample_count == NULL ) || ( p_box->data.p_ctts->pi_sample_offset == NULL ) ) { MP4_READBOX_EXIT( 0 ); } uint32_t i = 0; for( ; (i < p_box->data.p_ctts->i_entry_count )&&( i_read >=8 ); i++ ) { MP4_GET4BYTES( p_box->data.p_ctts->pi_sample_count[i] ); MP4_GET4BYTES( p_box->data.p_ctts->pi_sample_offset[i] ); } if ( i < p_box->data.p_ctts->i_entry_count ) p_box->data.p_ctts->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"ctts\" entry-count %d", p_box->data.p_ctts->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadLengthDescriptor( uint8_t **pp_peek, int64_t *i_read ) { unsigned int i_b; unsigned int i_len = 0; do { i_b = **pp_peek; (*pp_peek)++; (*i_read)--; i_len = ( i_len << 7 ) + ( i_b&0x7f ); } while( i_b&0x80 ); return( i_len ); } static void MP4_FreeBox_esds( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_esds->es_descriptor.psz_URL ); if( p_box->data.p_esds->es_descriptor.p_decConfigDescr ) { FREENULL( p_box->data.p_esds->es_descriptor.p_decConfigDescr->p_decoder_specific_info ); FREENULL( p_box->data.p_esds->es_descriptor.p_decConfigDescr ); } } static int MP4_ReadBox_esds( stream_t *p_stream, MP4_Box_t *p_box ) { #define es_descriptor p_box->data.p_esds->es_descriptor unsigned int i_len; unsigned int i_flags; unsigned int i_type; MP4_READBOX_ENTER( MP4_Box_data_esds_t ); MP4_GETVERSIONFLAGS( p_box->data.p_esds ); MP4_GET1BYTE( i_type ); if( i_type == 0x03 ) /* MP4ESDescrTag ISO/IEC 14496-1 8.3.3 */ { i_len = MP4_ReadLengthDescriptor( &p_peek, &i_read ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "found esds MPEG4ESDescr (%dBytes)", i_len ); #endif MP4_GET2BYTES( es_descriptor.i_ES_ID ); MP4_GET1BYTE( i_flags ); es_descriptor.b_stream_dependence = ( (i_flags&0x80) != 0); es_descriptor.b_url = ( (i_flags&0x40) != 0); es_descriptor.b_OCRstream = ( (i_flags&0x20) != 0); es_descriptor.i_stream_priority = i_flags&0x1f; if( es_descriptor.b_stream_dependence ) { MP4_GET2BYTES( es_descriptor.i_depend_on_ES_ID ); } if( es_descriptor.b_url ) { unsigned int i_len; MP4_GET1BYTE( i_len ); i_len = __MIN(i_read, i_len); es_descriptor.psz_URL = malloc( i_len + 1 ); if( es_descriptor.psz_URL ) { memcpy( es_descriptor.psz_URL, p_peek, i_len ); es_descriptor.psz_URL[i_len] = 0; } p_peek += i_len; i_read -= i_len; } else { es_descriptor.psz_URL = NULL; } if( es_descriptor.b_OCRstream ) { MP4_GET2BYTES( es_descriptor.i_OCR_ES_ID ); } MP4_GET1BYTE( i_type ); /* get next type */ } if( i_type != 0x04)/* MP4DecConfigDescrTag ISO/IEC 14496-1 8.3.4 */ { es_descriptor.p_decConfigDescr = NULL; MP4_READBOX_EXIT( 1 ); /* rest isn't interesting up to now */ } i_len = MP4_ReadLengthDescriptor( &p_peek, &i_read ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "found esds MP4DecConfigDescr (%dBytes)", i_len ); #endif es_descriptor.p_decConfigDescr = calloc( 1, sizeof( MP4_descriptor_decoder_config_t )); if( unlikely( es_descriptor.p_decConfigDescr == NULL ) ) MP4_READBOX_EXIT( 0 ); MP4_GET1BYTE( es_descriptor.p_decConfigDescr->i_objectProfileIndication ); MP4_GET1BYTE( i_flags ); es_descriptor.p_decConfigDescr->i_streamType = i_flags >> 2; es_descriptor.p_decConfigDescr->b_upStream = ( i_flags >> 1 )&0x01; MP4_GET3BYTES( es_descriptor.p_decConfigDescr->i_buffer_sizeDB ); MP4_GET4BYTES( es_descriptor.p_decConfigDescr->i_max_bitrate ); MP4_GET4BYTES( es_descriptor.p_decConfigDescr->i_avg_bitrate ); MP4_GET1BYTE( i_type ); if( i_type != 0x05 )/* MP4DecSpecificDescrTag ISO/IEC 14496-1 8.3.5 */ { es_descriptor.p_decConfigDescr->i_decoder_specific_info_len = 0; es_descriptor.p_decConfigDescr->p_decoder_specific_info = NULL; MP4_READBOX_EXIT( 1 ); } i_len = MP4_ReadLengthDescriptor( &p_peek, &i_read ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "found esds MP4DecSpecificDescr (%dBytes)", i_len ); #endif if( i_len > i_read ) MP4_READBOX_EXIT( 0 ); es_descriptor.p_decConfigDescr->i_decoder_specific_info_len = i_len; es_descriptor.p_decConfigDescr->p_decoder_specific_info = malloc( i_len ); if( unlikely( es_descriptor.p_decConfigDescr->p_decoder_specific_info == NULL ) ) MP4_READBOX_EXIT( 0 ); memcpy( es_descriptor.p_decConfigDescr->p_decoder_specific_info, p_peek, i_len ); MP4_READBOX_EXIT( 1 ); #undef es_descriptor } static void MP4_FreeBox_hvcC(MP4_Box_t *p_box ) { MP4_Box_data_hvcC_t *p_hvcC = p_box->data.p_hvcC; if( p_hvcC->i_hvcC > 0 ) FREENULL( p_hvcC->p_hvcC) ; } static int MP4_ReadBox_hvcC( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_hvcC_t *p_hvcC; MP4_READBOX_ENTER( MP4_Box_data_hvcC_t ); p_hvcC = p_box->data.p_hvcC; p_hvcC->i_hvcC = i_read; if( p_hvcC->i_hvcC > 0 ) { uint8_t * p = p_hvcC->p_hvcC = malloc( p_hvcC->i_hvcC ); if( p ) memcpy( p, p_peek, i_read ); } MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_avcC( MP4_Box_t *p_box ) { MP4_Box_data_avcC_t *p_avcC = p_box->data.p_avcC; int i; if( p_avcC->i_avcC > 0 ) FREENULL( p_avcC->p_avcC ); if( p_avcC->sps ) { for( i = 0; i < p_avcC->i_sps; i++ ) FREENULL( p_avcC->sps[i] ); } if( p_avcC->pps ) { for( i = 0; i < p_avcC->i_pps; i++ ) FREENULL( p_avcC->pps[i] ); } if( p_avcC->i_sps > 0 ) FREENULL( p_avcC->sps ); if( p_avcC->i_sps > 0 ) FREENULL( p_avcC->i_sps_length ); if( p_avcC->i_pps > 0 ) FREENULL( p_avcC->pps ); if( p_avcC->i_pps > 0 ) FREENULL( p_avcC->i_pps_length ); } static int MP4_ReadBox_avcC( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_avcC_t *p_avcC; int i; MP4_READBOX_ENTER( MP4_Box_data_avcC_t ); p_avcC = p_box->data.p_avcC; p_avcC->i_avcC = i_read; if( p_avcC->i_avcC > 0 ) { uint8_t * p = p_avcC->p_avcC = malloc( p_avcC->i_avcC ); if( p ) memcpy( p, p_peek, i_read ); } MP4_GET1BYTE( p_avcC->i_version ); MP4_GET1BYTE( p_avcC->i_profile ); MP4_GET1BYTE( p_avcC->i_profile_compatibility ); MP4_GET1BYTE( p_avcC->i_level ); MP4_GET1BYTE( p_avcC->i_reserved1 ); p_avcC->i_length_size = (p_avcC->i_reserved1&0x03) + 1; p_avcC->i_reserved1 >>= 2; MP4_GET1BYTE( p_avcC->i_reserved2 ); p_avcC->i_sps = p_avcC->i_reserved2&0x1f; p_avcC->i_reserved2 >>= 5; if( p_avcC->i_sps > 0 ) { p_avcC->i_sps_length = calloc( p_avcC->i_sps, sizeof( uint16_t ) ); p_avcC->sps = calloc( p_avcC->i_sps, sizeof( uint8_t* ) ); if( !p_avcC->i_sps_length || !p_avcC->sps ) goto error; for( i = 0; i < p_avcC->i_sps && i_read > 2; i++ ) { MP4_GET2BYTES( p_avcC->i_sps_length[i] ); if ( p_avcC->i_sps_length[i] > i_read ) goto error; p_avcC->sps[i] = malloc( p_avcC->i_sps_length[i] ); if( p_avcC->sps[i] ) memcpy( p_avcC->sps[i], p_peek, p_avcC->i_sps_length[i] ); p_peek += p_avcC->i_sps_length[i]; i_read -= p_avcC->i_sps_length[i]; } if ( i != p_avcC->i_sps ) goto error; } MP4_GET1BYTE( p_avcC->i_pps ); if( p_avcC->i_pps > 0 ) { p_avcC->i_pps_length = calloc( p_avcC->i_pps, sizeof( uint16_t ) ); p_avcC->pps = calloc( p_avcC->i_pps, sizeof( uint8_t* ) ); if( !p_avcC->i_pps_length || !p_avcC->pps ) goto error; for( i = 0; i < p_avcC->i_pps && i_read > 2; i++ ) { MP4_GET2BYTES( p_avcC->i_pps_length[i] ); if( p_avcC->i_pps_length[i] > i_read ) goto error; p_avcC->pps[i] = malloc( p_avcC->i_pps_length[i] ); if( p_avcC->pps[i] ) memcpy( p_avcC->pps[i], p_peek, p_avcC->i_pps_length[i] ); p_peek += p_avcC->i_pps_length[i]; i_read -= p_avcC->i_pps_length[i]; } if ( i != p_avcC->i_pps ) goto error; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"avcC\" version=%d profile=0x%x level=0x%x length size=%d sps=%d pps=%d", p_avcC->i_version, p_avcC->i_profile, p_avcC->i_level, p_avcC->i_length_size, p_avcC->i_sps, p_avcC->i_pps ); for( i = 0; i < p_avcC->i_sps; i++ ) { msg_Dbg( p_stream, " - sps[%d] length=%d", i, p_avcC->i_sps_length[i] ); } for( i = 0; i < p_avcC->i_pps; i++ ) { msg_Dbg( p_stream, " - pps[%d] length=%d", i, p_avcC->i_pps_length[i] ); } #endif MP4_READBOX_EXIT( 1 ); error: MP4_FreeBox_avcC( p_box ); MP4_READBOX_EXIT( 0 ); } static int MP4_ReadBox_WMA2( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_WMA2_t ); MP4_Box_data_WMA2_t *p_WMA2 = p_box->data.p_WMA2; MP4_GET2BYTESLE( p_WMA2->Format.wFormatTag ); MP4_GET2BYTESLE( p_WMA2->Format.nChannels ); MP4_GET4BYTESLE( p_WMA2->Format.nSamplesPerSec ); MP4_GET4BYTESLE( p_WMA2->Format.nAvgBytesPerSec ); MP4_GET2BYTESLE( p_WMA2->Format.nBlockAlign ); MP4_GET2BYTESLE( p_WMA2->Format.wBitsPerSample ); uint16_t i_cbSize; MP4_GET2BYTESLE( i_cbSize ); if ( i_read < 0 || i_cbSize > i_read ) goto error; p_WMA2->i_extra = i_cbSize; if ( p_WMA2->i_extra ) { p_WMA2->p_extra = malloc( p_WMA2->i_extra ); if ( ! p_WMA2->p_extra ) goto error; memcpy( p_WMA2->p_extra, p_peek, p_WMA2->i_extra ); } MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); } static void MP4_FreeBox_WMA2( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_WMA2->p_extra ); } static int MP4_ReadBox_strf( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_strf_t ); MP4_Box_data_strf_t *p_strf = p_box->data.p_strf; MP4_GET4BYTESLE( p_strf->bmiHeader.biSize ); MP4_GET4BYTESLE( p_strf->bmiHeader.biWidth ); MP4_GET4BYTESLE( p_strf->bmiHeader.biHeight ); MP4_GET2BYTESLE( p_strf->bmiHeader.biPlanes ); MP4_GET2BYTESLE( p_strf->bmiHeader.biBitCount ); MP4_GETFOURCC( p_strf->bmiHeader.biCompression ); MP4_GET4BYTESLE( p_strf->bmiHeader.biSizeImage ); MP4_GET4BYTESLE( p_strf->bmiHeader.biXPelsPerMeter ); MP4_GET4BYTESLE( p_strf->bmiHeader.biYPelsPerMeter ); MP4_GET4BYTESLE( p_strf->bmiHeader.biClrUsed ); MP4_GET4BYTESLE( p_strf->bmiHeader.biClrImportant ); if ( i_read < 0 ) goto error; p_strf->i_extra = i_read; if ( p_strf->i_extra ) { p_strf->p_extra = malloc( p_strf->i_extra ); if ( ! p_strf->p_extra ) goto error; memcpy( p_strf->p_extra, p_peek, i_read ); } MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); } static void MP4_FreeBox_strf( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_strf->p_extra ); } static int MP4_ReadBox_ASF( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_ASF_t ); MP4_Box_data_ASF_t *p_asf = p_box->data.p_asf; if (i_read != 8) MP4_READBOX_EXIT( 0 ); MP4_GET1BYTE( p_asf->i_stream_number ); /* remaining is unknown */ MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stsdext_chan( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_chan_t ); MP4_Box_data_chan_t *p_chan = p_box->data.p_chan; if ( i_read < 16 ) MP4_READBOX_EXIT( 0 ); MP4_GET1BYTE( p_chan->i_version ); MP4_GET3BYTES( p_chan->i_channels_flags ); MP4_GET4BYTES( p_chan->layout.i_channels_layout_tag ); MP4_GET4BYTES( p_chan->layout.i_channels_bitmap ); MP4_GET4BYTES( p_chan->layout.i_channels_description_count ); size_t i_descsize = 8 + 3 * sizeof(float); if ( (size_t)i_read < p_chan->layout.i_channels_description_count * i_descsize ) MP4_READBOX_EXIT( 0 ); p_chan->layout.p_descriptions = malloc( p_chan->layout.i_channels_description_count * i_descsize ); if ( !p_chan->layout.p_descriptions ) MP4_READBOX_EXIT( 0 ); uint32_t i; for( i=0; i<p_chan->layout.i_channels_description_count; i++ ) { if ( i_read < 20 ) break; MP4_GET4BYTES( p_chan->layout.p_descriptions[i].i_channel_label ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].i_channel_flags ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].f_coordinates[0] ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].f_coordinates[1] ); MP4_GET4BYTES( p_chan->layout.p_descriptions[i].f_coordinates[2] ); } if ( i<p_chan->layout.i_channels_description_count ) p_chan->layout.i_channels_description_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"chan\" flags=0x%x tag=0x%x bitmap=0x%x descriptions=%u", p_chan->i_channels_flags, p_chan->layout.i_channels_layout_tag, p_chan->layout.i_channels_bitmap, p_chan->layout.i_channels_description_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stsdext_chan( MP4_Box_t *p_box ) { MP4_Box_data_chan_t *p_chan = p_box->data.p_chan; free( p_chan->layout.p_descriptions ); } static int MP4_ReadBox_dec3( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dec3_t ); MP4_Box_data_dec3_t *p_dec3 = p_box->data.p_dec3; unsigned i_header; MP4_GET2BYTES( i_header ); p_dec3->i_data_rate = i_header >> 3; p_dec3->i_num_ind_sub = (i_header & 0x7) + 1; for (uint8_t i = 0; i < p_dec3->i_num_ind_sub; i++) { MP4_GET3BYTES( i_header ); p_dec3->stream[i].i_fscod = ( i_header >> 22 ) & 0x03; p_dec3->stream[i].i_bsid = ( i_header >> 17 ) & 0x01f; p_dec3->stream[i].i_bsmod = ( i_header >> 12 ) & 0x01f; p_dec3->stream[i].i_acmod = ( i_header >> 9 ) & 0x07; p_dec3->stream[i].i_lfeon = ( i_header >> 8 ) & 0x01; p_dec3->stream[i].i_num_dep_sub = (i_header >> 1) & 0x0f; if (p_dec3->stream[i].i_num_dep_sub) { MP4_GET1BYTE( p_dec3->stream[i].i_chan_loc ); p_dec3->stream[i].i_chan_loc |= (i_header & 1) << 8; } else p_dec3->stream[i].i_chan_loc = 0; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dec3\" bitrate %dkbps %d independant substreams", p_dec3->i_data_rate, p_dec3->i_num_ind_sub); for (uint8_t i = 0; i < p_dec3->i_num_ind_sub; i++) msg_Dbg( p_stream, "\tstream %d: bsid=0x%x bsmod=0x%x acmod=0x%x lfeon=0x%x " "num dependant subs=%d chan_loc=0x%x", i, p_dec3->stream[i].i_bsid, p_dec3->stream[i].i_bsmod, p_dec3->stream[i].i_acmod, p_dec3->stream[i].i_lfeon, p_dec3->stream[i].i_num_dep_sub, p_dec3->stream[i].i_chan_loc ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_dac3( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_dac3_t *p_dac3; MP4_READBOX_ENTER( MP4_Box_data_dac3_t ); p_dac3 = p_box->data.p_dac3; unsigned i_header; MP4_GET3BYTES( i_header ); p_dac3->i_fscod = ( i_header >> 22 ) & 0x03; p_dac3->i_bsid = ( i_header >> 17 ) & 0x01f; p_dac3->i_bsmod = ( i_header >> 14 ) & 0x07; p_dac3->i_acmod = ( i_header >> 11 ) & 0x07; p_dac3->i_lfeon = ( i_header >> 10 ) & 0x01; p_dac3->i_bitrate_code = ( i_header >> 5) & 0x1f; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dac3\" fscod=0x%x bsid=0x%x bsmod=0x%x acmod=0x%x lfeon=0x%x bitrate_code=0x%x", p_dac3->i_fscod, p_dac3->i_bsid, p_dac3->i_bsmod, p_dac3->i_acmod, p_dac3->i_lfeon, p_dac3->i_bitrate_code ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_dvc1( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_dvc1_t *p_dvc1; MP4_READBOX_ENTER( MP4_Box_data_dvc1_t ); p_dvc1 = p_box->data.p_dvc1; MP4_GET1BYTE( p_dvc1->i_profile_level ); /* profile is on 4bits, level 3bits */ uint8_t i_profile = (p_dvc1->i_profile_level & 0xf0) >> 4; if( i_profile != 0x06 && i_profile != 0x0c ) { msg_Warn( p_stream, "unsupported VC-1 profile (%"PRIu8"), please report", i_profile ); MP4_READBOX_EXIT( 0 ); } p_dvc1->i_vc1 = p_box->i_size - 7; /* Header + profile_level */ if( p_dvc1->i_vc1 > 0 ) { uint8_t *p = p_dvc1->p_vc1 = malloc( p_dvc1->i_vc1 ); if( p ) memcpy( p, p_peek, i_read ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dvc1\" profile=%"PRIu8" level=%i", i_profile, p_dvc1->i_profile_level & 0x0e >> 1 ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_enda( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_enda_t *p_enda; MP4_READBOX_ENTER( MP4_Box_data_enda_t ); p_enda = p_box->data.p_enda; MP4_GET2BYTES( p_enda->i_little_endian ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"enda\" little_endian=%d", p_enda->i_little_endian ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_sample_soun( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_soun; MP4_READBOX_ENTER( MP4_Box_data_sample_soun_t ); p_box->data.p_sample_soun->p_qt_description = NULL; /* Sanity check needed because the "wave" box does also contain an * "mp4a" box that we don't understand. */ if( i_read < 28 ) { i_read -= 30; MP4_READBOX_EXIT( 1 ); } for( unsigned i = 0; i < 6 ; i++ ) { MP4_GET1BYTE( p_box->data.p_sample_soun->i_reserved1[i] ); } MP4_GET2BYTES( p_box->data.p_sample_soun->i_data_reference_index ); /* * XXX hack -> produce a copy of the nearly complete chunk */ p_box->data.p_sample_soun->i_qt_description = 0; p_box->data.p_sample_soun->p_qt_description = NULL; if( i_read > 0 ) { p_box->data.p_sample_soun->p_qt_description = malloc( i_read ); if( p_box->data.p_sample_soun->p_qt_description ) { p_box->data.p_sample_soun->i_qt_description = i_read; memcpy( p_box->data.p_sample_soun->p_qt_description, p_peek, i_read ); } } MP4_GET2BYTES( p_box->data.p_sample_soun->i_qt_version ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_qt_revision_level ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_qt_vendor ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_channelcount ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_samplesize ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_compressionid ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_reserved3 ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_sampleratehi ); MP4_GET2BYTES( p_box->data.p_sample_soun->i_sampleratelo ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" stsd qt_version %"PRIu16" compid=%"PRIx16, p_box->data.p_sample_soun->i_qt_version, p_box->data.p_sample_soun->i_compressionid ); #endif if( p_box->data.p_sample_soun->i_qt_version == 1 && i_read >= 16 ) { /* SoundDescriptionV1 */ MP4_GET4BYTES( p_box->data.p_sample_soun->i_sample_per_packet ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_packet ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_frame ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_bytes_per_sample ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V1 sample/packet=%d bytes/packet=%d " "bytes/frame=%d bytes/sample=%d", p_box->data.p_sample_soun->i_sample_per_packet, p_box->data.p_sample_soun->i_bytes_per_packet, p_box->data.p_sample_soun->i_bytes_per_frame, p_box->data.p_sample_soun->i_bytes_per_sample ); #endif stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 44 ); } else if( p_box->data.p_sample_soun->i_qt_version == 2 && i_read >= 36 ) { /* SoundDescriptionV2 */ double f_sample_rate; int64_t i_dummy64; uint32_t i_channel, i_extoffset, i_dummy32; /* Checks */ if ( p_box->data.p_sample_soun->i_channelcount != 0x3 || p_box->data.p_sample_soun->i_samplesize != 0x0010 || p_box->data.p_sample_soun->i_compressionid != 0xFFFE || p_box->data.p_sample_soun->i_reserved3 != 0x0 || p_box->data.p_sample_soun->i_sampleratehi != 0x1 ||//65536 p_box->data.p_sample_soun->i_sampleratelo != 0x0 ) //remainder { msg_Err( p_stream, "invalid stsd V2 box defaults" ); MP4_READBOX_EXIT( 0 ); } /* !Checks */ MP4_GET4BYTES( i_extoffset ); /* offset to stsd extentions */ MP4_GET8BYTES( i_dummy64 ); memcpy( &f_sample_rate, &i_dummy64, 8 ); msg_Dbg( p_stream, "read box: %f Hz", f_sample_rate ); p_box->data.p_sample_soun->i_sampleratehi = (int)f_sample_rate % BLOCK16x16; p_box->data.p_sample_soun->i_sampleratelo = f_sample_rate / BLOCK16x16; MP4_GET4BYTES( i_channel ); p_box->data.p_sample_soun->i_channelcount = i_channel; MP4_GET4BYTES( i_dummy32 ); if ( i_dummy32 != 0x7F000000 ) { msg_Err( p_stream, "invalid stsd V2 box" ); MP4_READBOX_EXIT( 0 ); } MP4_GET4BYTES( p_box->data.p_sample_soun->i_constbitsperchannel ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_formatflags ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_constbytesperaudiopacket ); MP4_GET4BYTES( p_box->data.p_sample_soun->i_constLPCMframesperaudiopacket ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V2 rate=%f bitsperchannel=%u " "flags=%u bytesperpacket=%u lpcmframesperpacket=%u", f_sample_rate, p_box->data.p_sample_soun->i_constbitsperchannel, p_box->data.p_sample_soun->i_formatflags, p_box->data.p_sample_soun->i_constbytesperaudiopacket, p_box->data.p_sample_soun->i_constLPCMframesperaudiopacket ); #endif if ( i_extoffset < p_box->i_size ) stream_Seek( p_stream, p_box->i_pos + i_extoffset ); else stream_Seek( p_stream, p_box->i_pos + p_box->i_size ); } else { p_box->data.p_sample_soun->i_sample_per_packet = 0; p_box->data.p_sample_soun->i_bytes_per_packet = 0; p_box->data.p_sample_soun->i_bytes_per_frame = 0; p_box->data.p_sample_soun->i_bytes_per_sample = 0; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" V0 or qt1/2 (rest=%"PRId64")", i_read ); #endif stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 28 ); } if( p_box->i_type == ATOM_drms ) { msg_Warn( p_stream, "DRM protected streams are not supported." ); MP4_READBOX_EXIT( 0 ); } if( p_box->i_type == ATOM_samr || p_box->i_type == ATOM_sawb ) { /* Ignore channelcount for AMR (3gpp AMRSpecificBox) */ p_box->data.p_sample_soun->i_channelcount = 1; } /* Loads extensions */ MP4_ReadBoxContainerRaw( p_stream, p_box ); /* esds/wave/... */ #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"soun\" in stsd channel %d " "sample size %d sample rate %f", p_box->data.p_sample_soun->i_channelcount, p_box->data.p_sample_soun->i_samplesize, (float)p_box->data.p_sample_soun->i_sampleratehi + (float)p_box->data.p_sample_soun->i_sampleratelo / BLOCK16x16 ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_sample_soun( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sample_soun->p_qt_description ); } int MP4_ReadBox_sample_vide( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_vide; MP4_READBOX_ENTER( MP4_Box_data_sample_vide_t ); for( unsigned i = 0; i < 6 ; i++ ) { MP4_GET1BYTE( p_box->data.p_sample_vide->i_reserved1[i] ); } MP4_GET2BYTES( p_box->data.p_sample_vide->i_data_reference_index ); /* * XXX hack -> produce a copy of the nearly complete chunk */ if( i_read > 0 ) { p_box->data.p_sample_vide->p_qt_image_description = malloc( i_read ); if( unlikely( p_box->data.p_sample_vide->p_qt_image_description == NULL ) ) MP4_READBOX_EXIT( 0 ); p_box->data.p_sample_vide->i_qt_image_description = i_read; memcpy( p_box->data.p_sample_vide->p_qt_image_description, p_peek, i_read ); } else { p_box->data.p_sample_vide->i_qt_image_description = 0; p_box->data.p_sample_vide->p_qt_image_description = NULL; } MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_version ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_revision_level ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_vendor ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_temporal_quality ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_spatial_quality ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_width ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_height ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_horizresolution ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_vertresolution ); MP4_GET4BYTES( p_box->data.p_sample_vide->i_qt_data_size ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_frame_count ); if ( i_read < 32 ) MP4_READBOX_EXIT( 0 ); memcpy( &p_box->data.p_sample_vide->i_compressorname, p_peek, 32 ); p_peek += 32; i_read -= 32; MP4_GET2BYTES( p_box->data.p_sample_vide->i_depth ); MP4_GET2BYTES( p_box->data.p_sample_vide->i_qt_color_table ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 78); if( p_box->i_type == ATOM_drmi ) { msg_Warn( p_stream, "DRM protected streams are not supported." ); MP4_READBOX_EXIT( 0 ); } MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"vide\" in stsd %dx%d depth %d", p_box->data.p_sample_vide->i_width, p_box->data.p_sample_vide->i_height, p_box->data.p_sample_vide->i_depth ); #endif MP4_READBOX_EXIT( 1 ); } void MP4_FreeBox_sample_vide( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sample_vide->p_qt_image_description ); } static int MP4_ReadBox_sample_mp4s( stream_t *p_stream, MP4_Box_t *p_box ) { stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); return 1; } static int MP4_ReadBox_sample_text( stream_t *p_stream, MP4_Box_t *p_box ) { int32_t t; p_box->i_handler = ATOM_text; MP4_READBOX_ENTER( MP4_Box_data_sample_text_t ); MP4_GET4BYTES( p_box->data.p_sample_text->i_reserved1 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_reserved2 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_data_reference_index ); MP4_GET4BYTES( p_box->data.p_sample_text->i_display_flags ); MP4_GET4BYTES( t ); switch( t ) { /* FIXME search right signification */ case 1: // Center p_box->data.p_sample_text->i_justification_horizontal = 1; p_box->data.p_sample_text->i_justification_vertical = 1; break; case -1: // Flush Right p_box->data.p_sample_text->i_justification_horizontal = -1; p_box->data.p_sample_text->i_justification_vertical = -1; break; case -2: // Flush Left p_box->data.p_sample_text->i_justification_horizontal = 0; p_box->data.p_sample_text->i_justification_vertical = 0; break; case 0: // Flush Default default: p_box->data.p_sample_text->i_justification_horizontal = 1; p_box->data.p_sample_text->i_justification_vertical = -1; break; } MP4_GET2BYTES( p_box->data.p_sample_text->i_background_color[0] ); MP4_GET2BYTES( p_box->data.p_sample_text->i_background_color[1] ); MP4_GET2BYTES( p_box->data.p_sample_text->i_background_color[2] ); p_box->data.p_sample_text->i_background_color[3] = 0xFF; MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_top ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_left ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_bottom ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_right ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"text\" in stsd text" ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_sample_tx3g( stream_t *p_stream, MP4_Box_t *p_box ) { p_box->i_handler = ATOM_text; MP4_READBOX_ENTER( MP4_Box_data_sample_text_t ); MP4_GET4BYTES( p_box->data.p_sample_text->i_reserved1 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_reserved2 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_data_reference_index ); MP4_GET4BYTES( p_box->data.p_sample_text->i_display_flags ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_justification_horizontal ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_justification_vertical ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[0] ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[1] ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[2] ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_background_color[3] ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_top ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_left ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_bottom ); MP4_GET2BYTES( p_box->data.p_sample_text->i_text_box_right ); MP4_GET4BYTES( p_box->data.p_sample_text->i_reserved3 ); MP4_GET2BYTES( p_box->data.p_sample_text->i_font_id ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_font_face ); MP4_GET1BYTE ( p_box->data.p_sample_text->i_font_size ); MP4_GET4BYTES( p_box->data.p_sample_text->i_font_color ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"tx3g\" in stsd text" ); #endif MP4_READBOX_EXIT( 1 ); } #if 0 /* We can't easily call it, and anyway ~ 20 bytes lost isn't a real problem */ static void MP4_FreeBox_sample_text( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sample_text->psz_text_name ); } #endif static int MP4_ReadBox_stsd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsd ); MP4_GET4BYTES( p_box->data.p_stsd->i_entry_count ); stream_Seek( p_stream, p_box->i_pos + mp4_box_headersize( p_box ) + 8 ); MP4_ReadBoxContainerRaw( p_stream, p_box ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsd\" entry-count %d", p_box->data.p_stsd->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stsz( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsz_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsz ); MP4_GET4BYTES( p_box->data.p_stsz->i_sample_size ); MP4_GET4BYTES( p_box->data.p_stsz->i_sample_count ); if( p_box->data.p_stsz->i_sample_size == 0 ) { p_box->data.p_stsz->i_entry_size = calloc( p_box->data.p_stsz->i_sample_count, sizeof(uint32_t) ); if( unlikely( !p_box->data.p_stsz->i_entry_size ) ) MP4_READBOX_EXIT( 0 ); for( unsigned int i = 0; (i<p_box->data.p_stsz->i_sample_count)&&(i_read >= 4 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stsz->i_entry_size[i] ); } } else p_box->data.p_stsz->i_entry_size = NULL; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsz\" sample-size %d sample-count %d", p_box->data.p_stsz->i_sample_size, p_box->data.p_stsz->i_sample_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stsz( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stsz->i_entry_size ); } static void MP4_FreeBox_stsc( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stsc->i_first_chunk ); FREENULL( p_box->data.p_stsc->i_samples_per_chunk ); FREENULL( p_box->data.p_stsc->i_sample_description_index ); } static int MP4_ReadBox_stsc( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsc_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsc ); MP4_GET4BYTES( p_box->data.p_stsc->i_entry_count ); p_box->data.p_stsc->i_first_chunk = calloc( p_box->data.p_stsc->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stsc->i_samples_per_chunk = calloc( p_box->data.p_stsc->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stsc->i_sample_description_index = calloc( p_box->data.p_stsc->i_entry_count, sizeof(uint32_t) ); if( unlikely( p_box->data.p_stsc->i_first_chunk == NULL || p_box->data.p_stsc->i_samples_per_chunk == NULL || p_box->data.p_stsc->i_sample_description_index == NULL ) ) { MP4_READBOX_EXIT( 0 ); } for( unsigned int i = 0; (i < p_box->data.p_stsc->i_entry_count )&&( i_read >= 12 );i++ ) { MP4_GET4BYTES( p_box->data.p_stsc->i_first_chunk[i] ); MP4_GET4BYTES( p_box->data.p_stsc->i_samples_per_chunk[i] ); MP4_GET4BYTES( p_box->data.p_stsc->i_sample_description_index[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsc\" entry-count %d", p_box->data.p_stsc->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stco_co64( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_co64_t ); MP4_GETVERSIONFLAGS( p_box->data.p_co64 ); MP4_GET4BYTES( p_box->data.p_co64->i_entry_count ); p_box->data.p_co64->i_chunk_offset = calloc( p_box->data.p_co64->i_entry_count, sizeof(uint64_t) ); if( p_box->data.p_co64->i_chunk_offset == NULL ) MP4_READBOX_EXIT( 0 ); for( unsigned int i = 0; i < p_box->data.p_co64->i_entry_count; i++ ) { if( p_box->i_type == ATOM_stco ) { if( i_read < 4 ) { break; } MP4_GET4BYTES( p_box->data.p_co64->i_chunk_offset[i] ); } else { if( i_read < 8 ) { break; } MP4_GET8BYTES( p_box->data.p_co64->i_chunk_offset[i] ); } } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"co64\" entry-count %d", p_box->data.p_co64->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stco_co64( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_co64->i_chunk_offset ); } static int MP4_ReadBox_stss( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stss_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stss ); MP4_GET4BYTES( p_box->data.p_stss->i_entry_count ); p_box->data.p_stss->i_sample_number = calloc( p_box->data.p_stss->i_entry_count, sizeof(uint32_t) ); if( unlikely( p_box->data.p_stss->i_sample_number == NULL ) ) MP4_READBOX_EXIT( 0 ); unsigned int i; for( i = 0; (i < p_box->data.p_stss->i_entry_count )&&( i_read >= 4 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stss->i_sample_number[i] ); /* XXX in libmp4 sample begin at 0 */ p_box->data.p_stss->i_sample_number[i]--; } if ( i < p_box->data.p_stss->i_entry_count ) p_box->data.p_stss->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stss\" entry-count %d", p_box->data.p_stss->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stss( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stss->i_sample_number ); } static void MP4_FreeBox_stsh( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stsh->i_shadowed_sample_number ); FREENULL( p_box->data.p_stsh->i_sync_sample_number ); } static int MP4_ReadBox_stsh( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stsh_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stsh ); MP4_GET4BYTES( p_box->data.p_stsh->i_entry_count ); p_box->data.p_stsh->i_shadowed_sample_number = calloc( p_box->data.p_stsh->i_entry_count, sizeof(uint32_t) ); p_box->data.p_stsh->i_sync_sample_number = calloc( p_box->data.p_stsh->i_entry_count, sizeof(uint32_t) ); if( p_box->data.p_stsh->i_shadowed_sample_number == NULL || p_box->data.p_stsh->i_sync_sample_number == NULL ) { MP4_READBOX_EXIT( 0 ); } unsigned i; for( i = 0; (i < p_box->data.p_stss->i_entry_count )&&( i_read >= 8 ); i++ ) { MP4_GET4BYTES( p_box->data.p_stsh->i_shadowed_sample_number[i] ); MP4_GET4BYTES( p_box->data.p_stsh->i_sync_sample_number[i] ); } if ( i < p_box->data.p_stss->i_entry_count ) p_box->data.p_stss->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stsh\" entry-count %d", p_box->data.p_stsh->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_stdp( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_stdp_t ); MP4_GETVERSIONFLAGS( p_box->data.p_stdp ); p_box->data.p_stdp->i_priority = calloc( i_read / 2, sizeof(uint16_t) ); if( unlikely( !p_box->data.p_stdp->i_priority ) ) MP4_READBOX_EXIT( 0 ); for( unsigned i = 0; i < i_read / 2 ; i++ ) { MP4_GET2BYTES( p_box->data.p_stdp->i_priority[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stdp\" entry-count %"PRId64, i_read / 2 ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_stdp( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_stdp->i_priority ); } static void MP4_FreeBox_padb( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_padb->i_reserved1 ); FREENULL( p_box->data.p_padb->i_pad2 ); FREENULL( p_box->data.p_padb->i_reserved2 ); FREENULL( p_box->data.p_padb->i_pad1 ); } static int MP4_ReadBox_padb( stream_t *p_stream, MP4_Box_t *p_box ) { uint32_t count; MP4_READBOX_ENTER( MP4_Box_data_padb_t ); MP4_GETVERSIONFLAGS( p_box->data.p_padb ); MP4_GET4BYTES( p_box->data.p_padb->i_sample_count ); count = (p_box->data.p_padb->i_sample_count + 1) / 2; p_box->data.p_padb->i_reserved1 = calloc( count, sizeof(uint16_t) ); p_box->data.p_padb->i_pad2 = calloc( count, sizeof(uint16_t) ); p_box->data.p_padb->i_reserved2 = calloc( count, sizeof(uint16_t) ); p_box->data.p_padb->i_pad1 = calloc( count, sizeof(uint16_t) ); if( p_box->data.p_padb->i_reserved1 == NULL || p_box->data.p_padb->i_pad2 == NULL || p_box->data.p_padb->i_reserved2 == NULL || p_box->data.p_padb->i_pad1 == NULL ) { MP4_READBOX_EXIT( 0 ); } for( unsigned int i = 0; i < i_read / 2 ; i++ ) { if( i >= count ) { MP4_READBOX_EXIT( 0 ); } p_box->data.p_padb->i_reserved1[i] = ( (*p_peek) >> 7 )&0x01; p_box->data.p_padb->i_pad2[i] = ( (*p_peek) >> 4 )&0x07; p_box->data.p_padb->i_reserved1[i] = ( (*p_peek) >> 3 )&0x01; p_box->data.p_padb->i_pad1[i] = ( (*p_peek) )&0x07; p_peek += 1; i_read -= 1; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"stdp\" entry-count %"PRId64, i_read / 2 ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_elst( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_elst->i_segment_duration ); FREENULL( p_box->data.p_elst->i_media_time ); FREENULL( p_box->data.p_elst->i_media_rate_integer ); FREENULL( p_box->data.p_elst->i_media_rate_fraction ); } static int MP4_ReadBox_elst( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_elst_t ); MP4_GETVERSIONFLAGS( p_box->data.p_elst ); MP4_GET4BYTES( p_box->data.p_elst->i_entry_count ); p_box->data.p_elst->i_segment_duration = calloc( p_box->data.p_elst->i_entry_count, sizeof(uint64_t) ); p_box->data.p_elst->i_media_time = calloc( p_box->data.p_elst->i_entry_count, sizeof(int64_t) ); p_box->data.p_elst->i_media_rate_integer = calloc( p_box->data.p_elst->i_entry_count, sizeof(uint16_t) ); p_box->data.p_elst->i_media_rate_fraction = calloc( p_box->data.p_elst->i_entry_count, sizeof(uint16_t) ); if( p_box->data.p_elst->i_segment_duration == NULL || p_box->data.p_elst->i_media_time == NULL || p_box->data.p_elst->i_media_rate_integer == NULL || p_box->data.p_elst->i_media_rate_fraction == NULL ) { MP4_READBOX_EXIT( 0 ); } unsigned i; for( i = 0; i < p_box->data.p_elst->i_entry_count; i++ ) { if( p_box->data.p_elst->i_version == 1 ) { if ( i_read < 20 ) break; MP4_GET8BYTES( p_box->data.p_elst->i_segment_duration[i] ); MP4_GET8BYTES( p_box->data.p_elst->i_media_time[i] ); } else { if ( i_read < 12 ) break; MP4_GET4BYTES( p_box->data.p_elst->i_segment_duration[i] ); MP4_GET4BYTES( p_box->data.p_elst->i_media_time[i] ); p_box->data.p_elst->i_media_time[i] = (int32_t)p_box->data.p_elst->i_media_time[i]; } MP4_GET2BYTES( p_box->data.p_elst->i_media_rate_integer[i] ); MP4_GET2BYTES( p_box->data.p_elst->i_media_rate_fraction[i] ); } if ( i < p_box->data.p_elst->i_entry_count ) p_box->data.p_elst->i_entry_count = i; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"elst\" entry-count %lu", (unsigned long)p_box->data.p_elst->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_cprt( stream_t *p_stream, MP4_Box_t *p_box ) { uint16_t i_language; bool b_mac; MP4_READBOX_ENTER( MP4_Box_data_cprt_t ); MP4_GETVERSIONFLAGS( p_box->data.p_cprt ); MP4_GET2BYTES( i_language ); decodeQtLanguageCode( i_language, p_box->data.p_cprt->rgs_language, &b_mac ); MP4_GETSTRINGZ( p_box->data.p_cprt->psz_notice ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"cprt\" language %3.3s notice %s", p_box->data.p_cprt->rgs_language, p_box->data.p_cprt->psz_notice ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_cprt( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_cprt->psz_notice ); } static int MP4_ReadBox_dcom( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_dcom_t ); MP4_GETFOURCC( p_box->data.p_dcom->i_algorithm ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"dcom\" compression algorithm : %4.4s", (char*)&p_box->data.p_dcom->i_algorithm ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_cmvd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_cmvd_t ); MP4_GET4BYTES( p_box->data.p_cmvd->i_uncompressed_size ); p_box->data.p_cmvd->i_compressed_size = i_read; if( !( p_box->data.p_cmvd->p_data = malloc( i_read ) ) ) MP4_READBOX_EXIT( 0 ); /* now copy compressed data */ memcpy( p_box->data.p_cmvd->p_data, p_peek,i_read); p_box->data.p_cmvd->b_compressed = 1; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"cmvd\" compressed data size %d", p_box->data.p_cmvd->i_compressed_size ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_cmvd( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_cmvd->p_data ); } static int MP4_ReadBox_cmov( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_t *p_dcom; MP4_Box_t *p_cmvd; #ifdef HAVE_ZLIB_H stream_t *p_stream_memory; z_stream z_data; uint8_t *p_data; int i_result; #endif if( !( p_box->data.p_cmov = calloc(1, sizeof( MP4_Box_data_cmov_t ) ) ) ) return 0; if( !p_box->p_father || ( p_box->p_father->i_type != ATOM_moov && p_box->p_father->i_type != ATOM_foov ) ) { msg_Warn( p_stream, "Read box: \"cmov\" box alone" ); return 1; } if( !MP4_ReadBoxContainer( p_stream, p_box ) ) { return 0; } if( ( p_dcom = MP4_BoxGet( p_box, "dcom" ) ) == NULL || ( p_cmvd = MP4_BoxGet( p_box, "cmvd" ) ) == NULL || p_cmvd->data.p_cmvd->p_data == NULL ) { msg_Warn( p_stream, "read box: \"cmov\" incomplete" ); return 0; } if( p_dcom->data.p_dcom->i_algorithm != ATOM_zlib ) { msg_Dbg( p_stream, "read box: \"cmov\" compression algorithm : %4.4s " "not supported", (char*)&p_dcom->data.p_dcom->i_algorithm ); return 0; } #ifndef HAVE_ZLIB_H msg_Dbg( p_stream, "read box: \"cmov\" zlib unsupported" ); return 0; #else /* decompress data */ /* allocate a new buffer */ if( !( p_data = malloc( p_cmvd->data.p_cmvd->i_uncompressed_size ) ) ) return 0; /* init default structures */ z_data.next_in = p_cmvd->data.p_cmvd->p_data; z_data.avail_in = p_cmvd->data.p_cmvd->i_compressed_size; z_data.next_out = p_data; z_data.avail_out = p_cmvd->data.p_cmvd->i_uncompressed_size; z_data.zalloc = (alloc_func)Z_NULL; z_data.zfree = (free_func)Z_NULL; z_data.opaque = (voidpf)Z_NULL; /* init zlib */ if( inflateInit( &z_data ) != Z_OK ) { msg_Err( p_stream, "read box: \"cmov\" error while uncompressing" ); free( p_data ); return 0; } /* uncompress */ i_result = inflate( &z_data, Z_NO_FLUSH ); if( i_result != Z_OK && i_result != Z_STREAM_END ) { msg_Err( p_stream, "read box: \"cmov\" error while uncompressing" ); free( p_data ); return 0; } if( p_cmvd->data.p_cmvd->i_uncompressed_size != z_data.total_out ) { msg_Warn( p_stream, "read box: \"cmov\" uncompressing data size " "mismatch" ); } p_cmvd->data.p_cmvd->i_uncompressed_size = z_data.total_out; /* close zlib */ if( inflateEnd( &z_data ) != Z_OK ) { msg_Warn( p_stream, "read box: \"cmov\" error while uncompressing " "data (ignored)" ); } free( p_cmvd->data.p_cmvd->p_data ); p_cmvd->data.p_cmvd->p_data = p_data; p_cmvd->data.p_cmvd->b_compressed = 0; msg_Dbg( p_stream, "read box: \"cmov\" box successfully uncompressed" ); /* now create a memory stream */ p_stream_memory = stream_MemoryNew( VLC_OBJECT(p_stream), p_cmvd->data.p_cmvd->p_data, p_cmvd->data.p_cmvd->i_uncompressed_size, true ); /* and read uncompressd moov */ p_box->data.p_cmov->p_moov = MP4_ReadBox( p_stream_memory, NULL ); stream_Delete( p_stream_memory ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"cmov\" compressed movie header completed"); #endif return p_box->data.p_cmov->p_moov ? 1 : 0; #endif /* HAVE_ZLIB_H */ } static int MP4_ReadBox_rdrf( stream_t *p_stream, MP4_Box_t *p_box ) { uint32_t i_len; MP4_READBOX_ENTER( MP4_Box_data_rdrf_t ); MP4_GETVERSIONFLAGS( p_box->data.p_rdrf ); MP4_GETFOURCC( p_box->data.p_rdrf->i_ref_type ); MP4_GET4BYTES( i_len ); i_len++; if( i_len > 0 ) { p_box->data.p_rdrf->psz_ref = malloc( i_len ); if( p_box->data.p_rdrf->psz_ref == NULL ) MP4_READBOX_EXIT( 0 ); i_len--; for( unsigned i = 0; i < i_len; i++ ) { MP4_GET1BYTE( p_box->data.p_rdrf->psz_ref[i] ); } p_box->data.p_rdrf->psz_ref[i_len] = '\0'; } else { p_box->data.p_rdrf->psz_ref = NULL; } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rdrf\" type:%4.4s ref %s", (char*)&p_box->data.p_rdrf->i_ref_type, p_box->data.p_rdrf->psz_ref ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_rdrf( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_rdrf->psz_ref ); } static int MP4_ReadBox_rmdr( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_rmdr_t ); MP4_GETVERSIONFLAGS( p_box->data.p_rmdr ); MP4_GET4BYTES( p_box->data.p_rmdr->i_rate ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rmdr\" rate:%d", p_box->data.p_rmdr->i_rate ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_rmqu( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_rmqu_t ); MP4_GET4BYTES( p_box->data.p_rmqu->i_quality ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rmqu\" quality:%d", p_box->data.p_rmqu->i_quality ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_rmvc( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_rmvc_t ); MP4_GETVERSIONFLAGS( p_box->data.p_rmvc ); MP4_GETFOURCC( p_box->data.p_rmvc->i_gestaltType ); MP4_GET4BYTES( p_box->data.p_rmvc->i_val1 ); MP4_GET4BYTES( p_box->data.p_rmvc->i_val2 ); MP4_GET2BYTES( p_box->data.p_rmvc->i_checkType ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"rmvc\" gestaltType:%4.4s val1:0x%x val2:0x%x checkType:0x%x", (char*)&p_box->data.p_rmvc->i_gestaltType, p_box->data.p_rmvc->i_val1,p_box->data.p_rmvc->i_val2, p_box->data.p_rmvc->i_checkType ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_frma( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_frma_t ); MP4_GETFOURCC( p_box->data.p_frma->i_type ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"frma\" i_type:%4.4s", (char *)&p_box->data.p_frma->i_type ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_skcr( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_skcr_t ); MP4_GET4BYTES( p_box->data.p_skcr->i_init ); MP4_GET4BYTES( p_box->data.p_skcr->i_encr ); MP4_GET4BYTES( p_box->data.p_skcr->i_decr ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"skcr\" i_init:%d i_encr:%d i_decr:%d", p_box->data.p_skcr->i_init, p_box->data.p_skcr->i_encr, p_box->data.p_skcr->i_decr ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_drms( stream_t *p_stream, MP4_Box_t *p_box ) { VLC_UNUSED(p_box); /* ATOMs 'user', 'key', 'iviv', and 'priv' will be skipped, * so unless data decrypt itself by magic, there will be no playback, * but we never know... */ msg_Warn( p_stream, "DRM protected streams are not supported." ); return 1; } static int MP4_ReadBox_String( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_string_t ); if( p_box->i_size < 8 || p_box->i_size > SIZE_MAX ) MP4_READBOX_EXIT( 0 ); p_box->data.p_string->psz_text = malloc( p_box->i_size + 1 - 8 ); /* +\0, -name, -size */ if( p_box->data.p_string->psz_text == NULL ) MP4_READBOX_EXIT( 0 ); memcpy( p_box->data.p_string->psz_text, p_peek, p_box->i_size - 8 ); p_box->data.p_string->psz_text[p_box->i_size - 8] = '\0'; #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"%4.4s\" text=`%s'", (char *) & p_box->i_type, p_box->data.p_string->psz_text ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_String( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_string->psz_text ); } static int MP4_ReadBox_Binary( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_binary_t ); i_read = __MIN( i_read, UINT32_MAX ); if ( i_read > 0 ) { p_box->data.p_binary->p_blob = malloc( i_read ); if ( p_box->data.p_binary->p_blob ) { memcpy( p_box->data.p_binary->p_blob, p_peek, i_read ); p_box->data.p_binary->i_blob = i_read; } } MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_Binary( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_binary->p_blob ); p_box->data.p_binary->i_blob = 0; } static int MP4_ReadBox_data( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_data_t ); MP4_Box_data_data_t *p_data = p_box->data.p_data; if ( i_read < 8 || i_read - 8 > UINT32_MAX ) MP4_READBOX_EXIT( 0 ); uint8_t i_type; MP4_GET1BYTE( i_type ); if ( i_type != 0 ) { #ifdef MP4_VERBOSE msg_Dbg( p_stream, "skipping unknown 'data' atom with type %"PRIu8, i_type ); #endif MP4_READBOX_EXIT( 0 ); } MP4_GET3BYTES( p_data->e_wellknowntype ); MP4_GET2BYTES( p_data->locale.i_country ); MP4_GET2BYTES( p_data->locale.i_language ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read 'data' atom: knowntype=%"PRIu32", country=%"PRIu16" lang=%"PRIu16 ", size %"PRId64" bytes", p_data->e_wellknowntype, p_data->locale.i_country, p_data->locale.i_language, i_read ); #endif p_box->data.p_data->p_blob = malloc( i_read ); if ( !p_box->data.p_data->p_blob ) MP4_READBOX_EXIT( 0 ); p_box->data.p_data->i_blob = i_read; memcpy( p_box->data.p_data->p_blob, p_peek, i_read); MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_data( MP4_Box_t *p_box ) { free( p_box->data.p_data->p_blob ); } static int MP4_ReadBox_Metadata( stream_t *p_stream, MP4_Box_t *p_box ) { const uint8_t *p_peek; if ( stream_Peek( p_stream, &p_peek, 16 ) < 16 ) return 0; if ( stream_Read( p_stream, NULL, 8 ) < 8 ) return 0; return MP4_ReadBoxContainerChildren( p_stream, p_box, ATOM_data ); } /* Chapter support */ static void MP4_FreeBox_chpl( MP4_Box_t *p_box ) { MP4_Box_data_chpl_t *p_chpl = p_box->data.p_chpl; for( unsigned i = 0; i < p_chpl->i_chapter; i++ ) free( p_chpl->chapter[i].psz_name ); } static int MP4_ReadBox_chpl( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_Box_data_chpl_t *p_chpl; uint32_t i_dummy; VLC_UNUSED(i_dummy); int i; MP4_READBOX_ENTER( MP4_Box_data_chpl_t ); p_chpl = p_box->data.p_chpl; MP4_GETVERSIONFLAGS( p_chpl ); if ( i_read < 5 || p_chpl->i_version != 0x1 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( i_dummy ); MP4_GET1BYTE( p_chpl->i_chapter ); for( i = 0; i < p_chpl->i_chapter; i++ ) { uint64_t i_start; uint8_t i_len; int i_copy; if ( i_read < 9 ) break; MP4_GET8BYTES( i_start ); MP4_GET1BYTE( i_len ); p_chpl->chapter[i].psz_name = malloc( i_len + 1 ); if( !p_chpl->chapter[i].psz_name ) MP4_READBOX_EXIT( 0 ); i_copy = __MIN( i_len, i_read ); if( i_copy > 0 ) memcpy( p_chpl->chapter[i].psz_name, p_peek, i_copy ); p_chpl->chapter[i].psz_name[i_copy] = '\0'; p_chpl->chapter[i].i_start = i_start; p_peek += i_copy; i_read -= i_copy; } if ( i != p_chpl->i_chapter ) p_chpl->i_chapter = i; /* Bubble sort by increasing start date */ do { for( i = 0; i < p_chpl->i_chapter - 1; i++ ) { if( p_chpl->chapter[i].i_start > p_chpl->chapter[i+1].i_start ) { char *psz = p_chpl->chapter[i+1].psz_name; int64_t i64 = p_chpl->chapter[i+1].i_start; p_chpl->chapter[i+1].psz_name = p_chpl->chapter[i].psz_name; p_chpl->chapter[i+1].i_start = p_chpl->chapter[i].i_start; p_chpl->chapter[i].psz_name = psz; p_chpl->chapter[i].i_start = i64; i = -1; break; } } } while( i == -1 ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"chpl\" %d chapters", p_chpl->i_chapter ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tref_generic( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tref_generic_t ); p_box->data.p_tref_generic->i_track_ID = NULL; p_box->data.p_tref_generic->i_entry_count = i_read / sizeof(uint32_t); if( p_box->data.p_tref_generic->i_entry_count > 0 ) p_box->data.p_tref_generic->i_track_ID = calloc( p_box->data.p_tref_generic->i_entry_count, sizeof(uint32_t) ); if( p_box->data.p_tref_generic->i_track_ID == NULL ) MP4_READBOX_EXIT( 0 ); for( unsigned i = 0; i < p_box->data.p_tref_generic->i_entry_count; i++ ) { MP4_GET4BYTES( p_box->data.p_tref_generic->i_track_ID[i] ); } #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"chap\" %d references", p_box->data.p_tref_generic->i_entry_count ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_tref_generic( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_tref_generic->i_track_ID ); } static int MP4_ReadBox_keys( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_keys_t ); if ( i_read < 8 ) MP4_READBOX_EXIT( 0 ); uint32_t i_count; MP4_GET4BYTES( i_count ); /* reserved + flags */ if ( i_count != 0 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( i_count ); p_box->data.p_keys->p_entries = calloc( i_count, sizeof(*p_box->data.p_keys->p_entries) ); if ( !p_box->data.p_keys->p_entries ) MP4_READBOX_EXIT( 0 ); p_box->data.p_keys->i_entry_count = i_count; uint32_t i=0; for( ; i < i_count; i++ ) { if ( i_read < 8 ) break; uint32_t i_keysize; MP4_GET4BYTES( i_keysize ); if ( (i_keysize < 8) || (i_keysize - 4 > i_read) ) break; MP4_GETFOURCC( p_box->data.p_keys->p_entries[i].i_namespace ); i_keysize -= 8; p_box->data.p_keys->p_entries[i].psz_value = malloc( i_keysize + 1 ); if ( !p_box->data.p_keys->p_entries[i].psz_value ) break; memcpy( p_box->data.p_keys->p_entries[i].psz_value, p_peek, i_keysize ); p_box->data.p_keys->p_entries[i].psz_value[i_keysize] = 0; p_peek += i_keysize; i_read -= i_keysize; #ifdef MP4_ULTRA_VERBOSE msg_Dbg( p_stream, "read box: \"keys\": %u '%s'", i + 1, p_box->data.p_keys->p_entries[i].psz_value ); #endif } if ( i < i_count ) p_box->data.p_keys->i_entry_count = i; MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_keys( MP4_Box_t *p_box ) { for( uint32_t i=0; i<p_box->data.p_keys->i_entry_count; i++ ) free( p_box->data.p_keys->p_entries[i].psz_value ); free( p_box->data.p_keys->p_entries ); } static int MP4_ReadBox_meta( stream_t *p_stream, MP4_Box_t *p_box ) { uint8_t meta_data[8]; int i_actually_read; // skip over box header i_actually_read = stream_Read( p_stream, meta_data, 8 ); if( i_actually_read < 8 ) return 0; if ( p_box->p_father && p_box->p_father->i_type == ATOM_udta ) /* itunes udta/meta */ { /* meta content starts with a 4 byte version/flags value (should be 0) */ i_actually_read = stream_Read( p_stream, meta_data, 4 ); if( i_actually_read < 4 || memcmp( meta_data, "\0\0\0", 4 ) ) return 0; } if ( !MP4_ReadBoxContainerChildren( p_stream, p_box, ATOM_hdlr ) ) return 0; /* Mandatory */ const MP4_Box_t *p_hdlr = MP4_BoxGet( p_box, "hdlr" ); if ( !p_hdlr || !BOXDATA(p_hdlr) || ( BOXDATA(p_hdlr)->i_handler_type != HANDLER_mdta && BOXDATA(p_hdlr)->i_handler_type != HANDLER_mdir ) || BOXDATA(p_hdlr)->i_version != 0 ) return 0; /* then it behaves like a container */ return MP4_ReadBoxContainerRaw( p_stream, p_box ); } static int MP4_ReadBox_iods( stream_t *p_stream, MP4_Box_t *p_box ) { char i_unused; VLC_UNUSED(i_unused); MP4_READBOX_ENTER( MP4_Box_data_iods_t ); MP4_GETVERSIONFLAGS( p_box->data.p_iods ); MP4_GET1BYTE( i_unused ); /* tag */ MP4_GET1BYTE( i_unused ); /* length */ MP4_GET2BYTES( p_box->data.p_iods->i_object_descriptor ); /* 10bits, 6 other bits are used for other flags */ MP4_GET1BYTE( p_box->data.p_iods->i_OD_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_scene_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_audio_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_visual_profile_level ); MP4_GET1BYTE( p_box->data.p_iods->i_graphics_profile_level ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"iods\" objectDescriptorId: %i, OD: %i, scene: %i, audio: %i, " "visual: %i, graphics: %i", p_box->data.p_iods->i_object_descriptor >> 6, p_box->data.p_iods->i_OD_profile_level, p_box->data.p_iods->i_scene_profile_level, p_box->data.p_iods->i_audio_profile_level, p_box->data.p_iods->i_visual_profile_level, p_box->data.p_iods->i_graphics_profile_level ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_pasp( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_pasp_t ); MP4_GET4BYTES( p_box->data.p_pasp->i_horizontal_spacing ); MP4_GET4BYTES( p_box->data.p_pasp->i_vertical_spacing ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"paps\" %dx%d", p_box->data.p_pasp->i_horizontal_spacing, p_box->data.p_pasp->i_vertical_spacing); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mehd( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_mehd_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mehd ); if( p_box->data.p_mehd->i_version == 1 ) MP4_GET8BYTES( p_box->data.p_mehd->i_fragment_duration ); else /* version == 0 */ MP4_GET4BYTES( p_box->data.p_mehd->i_fragment_duration ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"mehd\" frag dur. %"PRIu64"", p_box->data.p_mehd->i_fragment_duration ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_trex( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_trex_t ); MP4_GETVERSIONFLAGS( p_box->data.p_trex ); MP4_GET4BYTES( p_box->data.p_trex->i_track_ID ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_description_index ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_duration ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_size ); MP4_GET4BYTES( p_box->data.p_trex->i_default_sample_flags ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"trex\" trackID: %"PRIu32"", p_box->data.p_trex->i_track_ID ); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_sdtp( stream_t *p_stream, MP4_Box_t *p_box ) { uint32_t i_sample_count; MP4_READBOX_ENTER( MP4_Box_data_sdtp_t ); MP4_Box_data_sdtp_t *p_sdtp = p_box->data.p_sdtp; MP4_GETVERSIONFLAGS( p_box->data.p_sdtp ); i_sample_count = i_read; p_sdtp->p_sample_table = calloc( i_sample_count, 1 ); if( !p_sdtp->p_sample_table ) MP4_READBOX_EXIT( 0 ); for( uint32_t i = 0; i < i_sample_count; i++ ) MP4_GET1BYTE( p_sdtp->p_sample_table[i] ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "i_sample_count is %"PRIu32"", i_sample_count ); if ( i_sample_count > 3 ) msg_Dbg( p_stream, "read box: \"sdtp\" head: %"PRIx8" %"PRIx8" %"PRIx8" %"PRIx8"", p_sdtp->p_sample_table[0], p_sdtp->p_sample_table[1], p_sdtp->p_sample_table[2], p_sdtp->p_sample_table[3] ); #endif MP4_READBOX_EXIT( 1 ); } static void MP4_FreeBox_sdtp( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_sdtp->p_sample_table ); } static int MP4_ReadBox_tsel( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_tsel_t ); uint32_t i_version; MP4_GET4BYTES( i_version ); if ( i_version != 0 || i_read < 4 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( p_box->data.p_tsel->i_switch_group ); /* ignore list of attributes as es are present before switch */ MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_mfro( stream_t *p_stream, MP4_Box_t *p_box ) { MP4_READBOX_ENTER( MP4_Box_data_mfro_t ); MP4_GETVERSIONFLAGS( p_box->data.p_mfro ); MP4_GET4BYTES( p_box->data.p_mfro->i_size ); #ifdef MP4_VERBOSE msg_Dbg( p_stream, "read box: \"mfro\" size: %"PRIu32"", p_box->data.p_mfro->i_size); #endif MP4_READBOX_EXIT( 1 ); } static int MP4_ReadBox_tfra( stream_t *p_stream, MP4_Box_t *p_box ) { #define READ_VARIABLE_LENGTH(lengthvar, p_array) switch (lengthvar)\ {\ case 0:\ MP4_GET1BYTE( p_array[i] );\ break;\ case 1:\ MP4_GET2BYTES( *((uint16_t *)&p_array[i*2]) );\ break;\ case 2:\ MP4_GET3BYTES( *((uint32_t *)&p_array[i*4]) );\ break;\ case 3:\ MP4_GET4BYTES( *((uint32_t *)&p_array[i*4]) );\ break;\ default:\ goto error;\ } #define FIX_VARIABLE_LENGTH(lengthvar) if ( lengthvar == 3 ) lengthvar = 4 uint32_t i_number_of_entries; MP4_READBOX_ENTER( MP4_Box_data_tfra_t ); MP4_Box_data_tfra_t *p_tfra = p_box->data.p_tfra; MP4_GETVERSIONFLAGS( p_box->data.p_tfra ); if ( p_tfra->i_version > 1 ) MP4_READBOX_EXIT( 0 ); MP4_GET4BYTES( p_tfra->i_track_ID ); uint32_t i_lengths = 0; MP4_GET4BYTES( i_lengths ); MP4_GET4BYTES( p_tfra->i_number_of_entries ); i_number_of_entries = p_tfra->i_number_of_entries; p_tfra->i_length_size_of_traf_num = i_lengths >> 4; p_tfra->i_length_size_of_trun_num = ( i_lengths & 0x0c ) >> 2; p_tfra->i_length_size_of_sample_num = i_lengths & 0x03; size_t size = 4 + 4*p_tfra->i_version; /* size in {4, 8} */ p_tfra->p_time = calloc( i_number_of_entries, size ); p_tfra->p_moof_offset = calloc( i_number_of_entries, size ); size = 1 + p_tfra->i_length_size_of_traf_num; /* size in [|1, 4|] */ if ( size == 3 ) size++; p_tfra->p_traf_number = calloc( i_number_of_entries, size ); size = 1 + p_tfra->i_length_size_of_trun_num; if ( size == 3 ) size++; p_tfra->p_trun_number = calloc( i_number_of_entries, size ); size = 1 + p_tfra->i_length_size_of_sample_num; if ( size == 3 ) size++; p_tfra->p_sample_number = calloc( i_number_of_entries, size ); if( !p_tfra->p_time || !p_tfra->p_moof_offset || !p_tfra->p_traf_number || !p_tfra->p_trun_number || !p_tfra->p_sample_number ) goto error; int i_fields_length = 3 + p_tfra->i_length_size_of_traf_num + p_tfra->i_length_size_of_trun_num + p_tfra->i_length_size_of_sample_num; uint32_t i; for( i = 0; i < i_number_of_entries; i++ ) { if( p_tfra->i_version == 1 ) { if ( i_read < i_fields_length + 16 ) break; MP4_GET8BYTES( *((uint64_t *)&p_tfra->p_time[i*2]) ); MP4_GET8BYTES( *((uint64_t *)&p_tfra->p_moof_offset[i*2]) ); } else { if ( i_read < i_fields_length + 8 ) break; MP4_GET4BYTES( p_tfra->p_time[i] ); MP4_GET4BYTES( p_tfra->p_moof_offset[i] ); } READ_VARIABLE_LENGTH(p_tfra->i_length_size_of_traf_num, p_tfra->p_traf_number); READ_VARIABLE_LENGTH(p_tfra->i_length_size_of_trun_num, p_tfra->p_trun_number); READ_VARIABLE_LENGTH(p_tfra->i_length_size_of_sample_num, p_tfra->p_sample_number); } if ( i < i_number_of_entries ) i_number_of_entries = i; FIX_VARIABLE_LENGTH(p_tfra->i_length_size_of_traf_num); FIX_VARIABLE_LENGTH(p_tfra->i_length_size_of_trun_num); FIX_VARIABLE_LENGTH(p_tfra->i_length_size_of_sample_num); #ifdef MP4_ULTRA_VERBOSE for( i = 0; i < i_number_of_entries; i++ ) { if( p_tfra->i_version == 0 ) { msg_Dbg( p_stream, "tfra[%"PRIu32"] time[%"PRIu32"]: %"PRIu32", " "moof_offset[%"PRIu32"]: %"PRIu32"", p_tfra->i_track_ID, i, p_tfra->p_time[i], i, p_tfra->p_moof_offset[i] ); } else { msg_Dbg( p_stream, "tfra[%"PRIu32"] time[%"PRIu32"]: %"PRIu64", " "moof_offset[%"PRIu32"]: %"PRIu64"", p_tfra->i_track_ID, i, ((uint64_t *)(p_tfra->p_time))[i], i, ((uint64_t *)(p_tfra->p_moof_offset))[i] ); } } #endif #ifdef MP4_VERBOSE msg_Dbg( p_stream, "tfra[%"PRIu32"] %"PRIu32" entries", p_tfra->i_track_ID, i_number_of_entries ); #endif MP4_READBOX_EXIT( 1 ); error: MP4_READBOX_EXIT( 0 ); #undef READ_VARIABLE_LENGTH #undef FIX_VARIABLE_LENGTH } static void MP4_FreeBox_tfra( MP4_Box_t *p_box ) { FREENULL( p_box->data.p_tfra->p_time ); FREENULL( p_box->data.p_tfra->p_moof_offset ); FREENULL( p_box->data.p_tfra->p_traf_number ); FREENULL( p_box->data.p_tfra->p_trun_number ); FREENULL( p_box->data.p_tfra->p_sample_number ); } static int MP4_ReadBox_pnot( stream_t *p_stream, MP4_Box_t *p_box ) { if ( p_box->i_size != 20 ) return 0; MP4_READBOX_ENTER( MP4_Box_data_pnot_t ); MP4_GET4BYTES( p_box->data.p_pnot->i_date ); uint16_t i_version; MP4_GET2BYTES( i_version ); if ( i_version != 0 ) MP4_READBOX_EXIT( 0 ); MP4_GETFOURCC( p_box->data.p_pnot->i_type ); MP4_GET2BYTES( p_box->data.p_pnot->i_index ); MP4_READBOX_EXIT( 1 ); } /* For generic */ static int MP4_ReadBox_default( stream_t *p_stream, MP4_Box_t *p_box ) { if( !p_box->p_father ) { goto unknown; } if( p_box->p_father->i_type == ATOM_stsd ) { MP4_Box_t *p_mdia = MP4_BoxGet( p_box, "../../../.." ); MP4_Box_t *p_hdlr; if( p_mdia == NULL || p_mdia->i_type != ATOM_mdia || (p_hdlr = MP4_BoxGet( p_mdia, "hdlr" )) == NULL ) { goto unknown; } switch( p_hdlr->data.p_hdlr->i_handler_type ) { case ATOM_soun: return MP4_ReadBox_sample_soun( p_stream, p_box ); case ATOM_vide: return MP4_ReadBox_sample_vide( p_stream, p_box ); case ATOM_text: return MP4_ReadBox_sample_text( p_stream, p_box ); case ATOM_tx3g: case ATOM_sbtl: return MP4_ReadBox_sample_tx3g( p_stream, p_box ); default: msg_Warn( p_stream, "unknown handler type in stsd (incompletely loaded)" ); return 1; } } unknown: if MP4_BOX_TYPE_ASCII() msg_Warn( p_stream, "unknown box type %4.4s (incompletely loaded)", (char*)&p_box->i_type ); else msg_Warn( p_stream, "unknown box type c%3.3s (incompletely loaded)", (char*)&p_box->i_type+1 ); p_box->e_flags |= BOX_FLAG_INCOMPLETE; return 1; } /**** ------------------------------------------------------------------- ****/ /**** "Higher level" Functions ****/ /**** ------------------------------------------------------------------- ****/ static const struct { uint32_t i_type; int (*MP4_ReadBox_function )( stream_t *p_stream, MP4_Box_t *p_box ); void (*MP4_FreeBox_function )( MP4_Box_t *p_box ); uint32_t i_parent; /* set parent to restrict, duplicating if needed; 0 for any */ } MP4_Box_Function [] = { /* Containers */ { ATOM_moov, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_foov, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_trak, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moov }, { ATOM_trak, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_foov }, { ATOM_mdia, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_trak }, { ATOM_moof, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_minf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_mdia }, { ATOM_stbl, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_dinf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_dinf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_meta }, { ATOM_edts, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_trak }, { ATOM_udta, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_nmhd, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_hnti, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_udta }, { ATOM_rmra, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moov }, { ATOM_rmda, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_rmra }, { ATOM_tref, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_trak }, { ATOM_gmhd, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_minf }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_stsd }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_mp4a }, /* some quicktime mp4a/wave/mp4a.. */ { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_WMA2 }, /* flip4mac */ { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_in24 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_in32 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_fl32 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_fl64 }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_QDMC }, { ATOM_wave, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_QDM2 }, { ATOM_ilst, MP4_ReadBox_ilst, MP4_FreeBox_Common, ATOM_meta }, { ATOM_mvex, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moov }, { ATOM_mvex, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_ftyp }, /* specific box */ { ATOM_ftyp, MP4_ReadBox_ftyp, MP4_FreeBox_ftyp, 0 }, { ATOM_cmov, MP4_ReadBox_cmov, MP4_FreeBox_Common, 0 }, { ATOM_mvhd, MP4_ReadBox_mvhd, MP4_FreeBox_Common, ATOM_moov }, { ATOM_mvhd, MP4_ReadBox_mvhd, MP4_FreeBox_Common, ATOM_foov }, { ATOM_tkhd, MP4_ReadBox_tkhd, MP4_FreeBox_Common, ATOM_trak }, { ATOM_load, MP4_ReadBox_load, MP4_FreeBox_Common, ATOM_trak }, { ATOM_mdhd, MP4_ReadBox_mdhd, MP4_FreeBox_Common, ATOM_mdia }, { ATOM_hdlr, MP4_ReadBox_hdlr, MP4_FreeBox_hdlr, ATOM_mdia }, { ATOM_hdlr, MP4_ReadBox_hdlr, MP4_FreeBox_hdlr, ATOM_meta }, { ATOM_hdlr, MP4_ReadBox_hdlr, MP4_FreeBox_hdlr, ATOM_minf }, { ATOM_vmhd, MP4_ReadBox_vmhd, MP4_FreeBox_Common, ATOM_minf }, { ATOM_smhd, MP4_ReadBox_smhd, MP4_FreeBox_Common, ATOM_minf }, { ATOM_hmhd, MP4_ReadBox_hmhd, MP4_FreeBox_Common, ATOM_minf }, { ATOM_alis, MP4_ReadBoxSkip, MP4_FreeBox_Common, ATOM_dref }, { ATOM_url, MP4_ReadBox_url, MP4_FreeBox_url, 0 }, { ATOM_urn, MP4_ReadBox_urn, MP4_FreeBox_urn, 0 }, { ATOM_dref, MP4_ReadBox_dref, MP4_FreeBox_Common, 0 }, { ATOM_stts, MP4_ReadBox_stts, MP4_FreeBox_stts, ATOM_stbl }, { ATOM_ctts, MP4_ReadBox_ctts, MP4_FreeBox_ctts, ATOM_stbl }, { ATOM_stsd, MP4_ReadBox_stsd, MP4_FreeBox_Common, ATOM_stbl }, { ATOM_stsz, MP4_ReadBox_stsz, MP4_FreeBox_stsz, ATOM_stbl }, { ATOM_stsc, MP4_ReadBox_stsc, MP4_FreeBox_stsc, ATOM_stbl }, { ATOM_stco, MP4_ReadBox_stco_co64, MP4_FreeBox_stco_co64, ATOM_stbl }, { ATOM_co64, MP4_ReadBox_stco_co64, MP4_FreeBox_stco_co64, ATOM_stbl }, { ATOM_stss, MP4_ReadBox_stss, MP4_FreeBox_stss, ATOM_stbl }, { ATOM_stsh, MP4_ReadBox_stsh, MP4_FreeBox_stsh, ATOM_stbl }, { ATOM_stdp, MP4_ReadBox_stdp, MP4_FreeBox_stdp, 0 }, { ATOM_padb, MP4_ReadBox_padb, MP4_FreeBox_padb, 0 }, { ATOM_elst, MP4_ReadBox_elst, MP4_FreeBox_elst, ATOM_edts }, { ATOM_cprt, MP4_ReadBox_cprt, MP4_FreeBox_cprt, 0 }, { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_wave }, /* mp4a in wave chunk */ { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_mp4a }, { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_mp4v }, { ATOM_esds, MP4_ReadBox_esds, MP4_FreeBox_esds, ATOM_mp4s }, { ATOM_dcom, MP4_ReadBox_dcom, MP4_FreeBox_Common, 0 }, { ATOM_cmvd, MP4_ReadBox_cmvd, MP4_FreeBox_cmvd, 0 }, { ATOM_avcC, MP4_ReadBox_avcC, MP4_FreeBox_avcC, ATOM_avc1 }, { ATOM_hvcC, MP4_ReadBox_hvcC, MP4_FreeBox_hvcC, 0 }, { ATOM_dac3, MP4_ReadBox_dac3, MP4_FreeBox_Common, 0 }, { ATOM_dec3, MP4_ReadBox_dec3, MP4_FreeBox_Common, 0 }, { ATOM_dvc1, MP4_ReadBox_dvc1, MP4_FreeBox_Common, 0 }, { ATOM_enda, MP4_ReadBox_enda, MP4_FreeBox_Common, 0 }, { ATOM_iods, MP4_ReadBox_iods, MP4_FreeBox_Common, 0 }, { ATOM_pasp, MP4_ReadBox_pasp, MP4_FreeBox_Common, 0 }, { ATOM_keys, MP4_ReadBox_keys, MP4_FreeBox_keys, ATOM_meta }, /* Quicktime preview atoms, all at root */ { ATOM_pnot, MP4_ReadBox_pnot, MP4_FreeBox_Common, 0 }, { ATOM_pict, MP4_ReadBox_Binary, MP4_FreeBox_Binary, 0 }, { ATOM_PICT, MP4_ReadBox_Binary, MP4_FreeBox_Binary, 0 }, /* Nothing to do with this box */ { ATOM_mdat, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_skip, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_free, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_wide, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, { ATOM_binm, MP4_ReadBoxSkip, MP4_FreeBox_Common, 0 }, /* Subtitles */ { ATOM_tx3g, MP4_ReadBox_sample_tx3g, MP4_FreeBox_Common, 0 }, //{ ATOM_text, MP4_ReadBox_sample_text, MP4_FreeBox_Common, 0 }, /* for codecs */ { ATOM_soun, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ac3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_eac3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_lpcm, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ms02, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ms11, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ms55, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM__mp3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_mp4a, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_twos, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_sowt, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_QDMC, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_QDM2, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ima4, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_IMA4, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_dvi, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_alaw, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_ulaw, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_raw, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_MAC3, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_MAC6, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_Qclp, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_samr, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_sawb, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_OggS, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_alac, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, { ATOM_WMA2, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, ATOM_stsd }, /* flip4mac */ /* Sound extensions */ { ATOM_chan, MP4_ReadBox_stsdext_chan, MP4_FreeBox_stsdext_chan, 0 }, { ATOM_WMA2, MP4_ReadBox_WMA2, MP4_FreeBox_WMA2, ATOM_wave }, /* flip4mac */ { ATOM_drmi, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_vide, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mp4v, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_SVQ1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_SVQ3, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_ZyGo, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_DIVX, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_XVID, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_h263, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_s263, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_cvid, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3IV1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3iv1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3IV2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3iv2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3IVD, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3ivd, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3VID, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_3vid, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mjpa, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mjpb, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_qdrw, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mp2v, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_hdv2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_WMV3, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_mjqt, MP4_ReadBox_default, NULL, 0 }, /* found in mjpa/b */ { ATOM_mjht, MP4_ReadBox_default, NULL, 0 }, { ATOM_dvc, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_dvp, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_dv5n, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_dv5p, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_VP31, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_vp31, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_h264, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_jpeg, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_avc1, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, ATOM_stsd }, { ATOM_yv12, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, 0 }, { ATOM_yuv2, MP4_ReadBox_sample_vide, MP4_FreeBox_sample_vide, 0 }, { ATOM_strf, MP4_ReadBox_strf, MP4_FreeBox_strf, ATOM_WMV3 }, /* flip4mac */ { ATOM_ASF , MP4_ReadBox_ASF, MP4_FreeBox_Common, ATOM_WMV3 }, /* flip4mac */ { ATOM_ASF , MP4_ReadBox_ASF, MP4_FreeBox_Common, ATOM_wave }, /* flip4mac */ { ATOM_mp4s, MP4_ReadBox_sample_mp4s, MP4_FreeBox_Common, ATOM_stsd }, /* XXX there is 2 box where we could find this entry stbl and tref*/ { ATOM_hint, MP4_ReadBox_default, MP4_FreeBox_Common, 0 }, /* found in tref box */ { ATOM_dpnd, MP4_ReadBox_default, NULL, 0 }, { ATOM_ipir, MP4_ReadBox_default, NULL, 0 }, { ATOM_mpod, MP4_ReadBox_default, NULL, 0 }, { ATOM_chap, MP4_ReadBox_tref_generic, MP4_FreeBox_tref_generic, 0 }, /* found in hnti */ { ATOM_rtp, MP4_ReadBox_default, NULL, 0 }, /* found in rmra/rmda */ { ATOM_rdrf, MP4_ReadBox_rdrf, MP4_FreeBox_rdrf , ATOM_rmda }, { ATOM_rmdr, MP4_ReadBox_rmdr, MP4_FreeBox_Common, ATOM_rmda }, { ATOM_rmqu, MP4_ReadBox_rmqu, MP4_FreeBox_Common, ATOM_rmda }, { ATOM_rmvc, MP4_ReadBox_rmvc, MP4_FreeBox_Common, ATOM_rmda }, { ATOM_drms, MP4_ReadBox_sample_soun, MP4_FreeBox_sample_soun, 0 }, { ATOM_sinf, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_schi, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_user, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_key, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_iviv, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_priv, MP4_ReadBox_drms, MP4_FreeBox_Common, 0 }, { ATOM_frma, MP4_ReadBox_frma, MP4_FreeBox_Common, ATOM_sinf }, /* and rinf */ { ATOM_frma, MP4_ReadBox_frma, MP4_FreeBox_Common, ATOM_wave }, /* flip4mac */ { ATOM_skcr, MP4_ReadBox_skcr, MP4_FreeBox_Common, 0 }, /* ilst meta tags */ { ATOM_0xa9ART, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9alb, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9cmt, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9com, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9day, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9des, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9enc, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9gen, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9grp, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9lyr, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9nam, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9too, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9trk, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_0xa9wrt, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_aART, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_atID, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, /* iTunes */ { ATOM_cnID, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, /* iTunes */ { ATOM_covr, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_disk, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_flvr, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_gnre, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_rtng, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_trkn, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, { ATOM_xid_, MP4_ReadBox_Metadata, MP4_FreeBox_Common, ATOM_ilst }, /* udta */ { ATOM_0x40PRM, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0x40PRQ, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9ART, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9alb, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9ard, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9arg, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9aut, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9cak, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9cmt, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9con, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9com, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9cpy, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9day, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9des, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9dir, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9dis, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9dsa, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9fmt, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9gen, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9grp, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9hst, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9inf, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9isr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lab, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lal, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lnt, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9lyr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9mak, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9mal, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9mod, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9nam, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9ope, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9phg, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9PRD, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9prd, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9prf, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9pub, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9req, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9sne, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9snm, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9sol, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9src, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9st3, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9swr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9thx, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9too, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9trk, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9url, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9wrn, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9xpd, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_0xa9xyz, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_chpl, MP4_ReadBox_chpl, MP4_FreeBox_chpl, ATOM_udta }, /* nero unlabeled chapters list */ { ATOM_MCPS, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_name, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_vndr, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, { ATOM_SDLN, MP4_ReadBox_String, MP4_FreeBox_String, ATOM_udta }, /* udta, non meta */ { ATOM_tsel, MP4_ReadBox_tsel, MP4_FreeBox_Common, ATOM_udta }, /* iTunes/Quicktime meta info */ { ATOM_meta, MP4_ReadBox_meta, MP4_FreeBox_Common, 0 }, { ATOM_data, MP4_ReadBox_data, MP4_FreeBox_data, 0 }, /* found in smoothstreaming */ { ATOM_traf, MP4_ReadBoxContainer, MP4_FreeBox_Common, ATOM_moof }, { ATOM_mfra, MP4_ReadBoxContainer, MP4_FreeBox_Common, 0 }, { ATOM_mfhd, MP4_ReadBox_mfhd, MP4_FreeBox_Common, ATOM_moof }, { ATOM_sidx, MP4_ReadBox_sidx, MP4_FreeBox_sidx, 0 }, { ATOM_tfhd, MP4_ReadBox_tfhd, MP4_FreeBox_Common, ATOM_traf }, { ATOM_trun, MP4_ReadBox_trun, MP4_FreeBox_trun, ATOM_traf }, { ATOM_trex, MP4_ReadBox_trex, MP4_FreeBox_Common, ATOM_mvex }, { ATOM_mehd, MP4_ReadBox_mehd, MP4_FreeBox_Common, ATOM_mvex }, { ATOM_sdtp, MP4_ReadBox_sdtp, MP4_FreeBox_sdtp, 0 }, { ATOM_tfra, MP4_ReadBox_tfra, MP4_FreeBox_tfra, ATOM_mfra }, { ATOM_mfro, MP4_ReadBox_mfro, MP4_FreeBox_Common, ATOM_mfra }, { ATOM_uuid, MP4_ReadBox_uuid, MP4_FreeBox_uuid, 0 }, /* Last entry */ { 0, MP4_ReadBox_default, NULL, 0 } }; /***************************************************************************** * MP4_ReadBox : parse the actual box and the children * XXX : Do not go to the next box *****************************************************************************/ static MP4_Box_t *MP4_ReadBox( stream_t *p_stream, MP4_Box_t *p_father ) { MP4_Box_t *p_box = calloc( 1, sizeof( MP4_Box_t ) ); /* Needed to ensure simple on error handler */ unsigned int i_index; if( p_box == NULL ) return NULL; if( !MP4_ReadBoxCommon( p_stream, p_box ) ) { msg_Warn( p_stream, "cannot read one box" ); free( p_box ); return NULL; } if( !p_box->i_size ) { msg_Dbg( p_stream, "found an empty box (null size)" ); free( p_box ); return NULL; } p_box->p_father = p_father; /* Now search function to call */ for( i_index = 0; ; i_index++ ) { if ( MP4_Box_Function[i_index].i_parent && p_box->p_father && p_box->p_father->i_type != MP4_Box_Function[i_index].i_parent ) continue; if( ( MP4_Box_Function[i_index].i_type == p_box->i_type )|| ( MP4_Box_Function[i_index].i_type == 0 ) ) { break; } } if( !(MP4_Box_Function[i_index].MP4_ReadBox_function)( p_stream, p_box ) ) { off_t i_end = p_box->i_pos + p_box->i_size; MP4_BoxFree( p_stream, p_box ); stream_Seek( p_stream, i_end ); /* Skip the failed box */ return NULL; } p_box->pf_free = MP4_Box_Function[i_index].MP4_FreeBox_function; return p_box; } /***************************************************************************** * MP4_FreeBox : free memory after read with MP4_ReadBox and all * the children *****************************************************************************/ void MP4_BoxFree( stream_t *s, MP4_Box_t *p_box ) { MP4_Box_t *p_child; if( !p_box ) return; /* hehe */ for( p_child = p_box->p_first; p_child != NULL; ) { MP4_Box_t *p_next; p_next = p_child->p_next; MP4_BoxFree( s, p_child ); p_child = p_next; } /* Now search function to call */ if( p_box->data.p_payload ) { if (unlikely( p_box->pf_free == NULL )) { /* Should not happen */ if MP4_BOX_TYPE_ASCII() msg_Warn( s, "cannot free box %4.4s, type unknown", (char*)&p_box->i_type ); else msg_Warn( s, "cannot free box c%3.3s, type unknown", (char*)&p_box->i_type+1 ); } else { p_box->pf_free( p_box ); } free( p_box->data.p_payload ); } free( p_box ); } /* SmooBox is a very simple MP4 box, VLC specific, used only for the stream_filter to * send information to the demux. SmooBox is actually a simplified moov box (we wanted * to avoid the hassle of building a moov box at the stream_filter level) */ MP4_Box_t *MP4_BoxGetSmooBox( stream_t *s ) { /* p_chunk is a virtual root container for the smoo box */ MP4_Box_t *p_chunk; MP4_Box_t *p_smoo; p_chunk = calloc( 1, sizeof( MP4_Box_t ) ); if( unlikely( p_chunk == NULL ) ) return NULL; p_chunk->i_type = ATOM_root; p_chunk->i_shortsize = 1; p_smoo = MP4_ReadBox( s, p_chunk ); if( !p_smoo || p_smoo->i_type != ATOM_uuid || CmpUUID( &p_smoo->i_uuid, &SmooBoxUUID ) ) { msg_Warn( s, "no smoo box found!"); goto error; } p_chunk->p_first = p_smoo; p_chunk->p_last = p_smoo; return p_chunk; error: free( p_chunk ); return NULL; } MP4_Box_t *MP4_BoxGetNextChunk( stream_t *s ) { /* p_chunk is a virtual root container for the moof and mdat boxes */ MP4_Box_t *p_chunk; MP4_Box_t *p_tmp_box = NULL; p_tmp_box = calloc( 1, sizeof( MP4_Box_t ) ); if( unlikely( p_tmp_box == NULL ) ) return NULL; /* We might get a ftyp box or a SmooBox */ MP4_ReadBoxCommon( s, p_tmp_box ); if( (p_tmp_box->i_type == ATOM_uuid && !CmpUUID( &p_tmp_box->i_uuid, &SmooBoxUUID )) ) { free( p_tmp_box ); return MP4_BoxGetSmooBox( s ); } else if( p_tmp_box->i_type == ATOM_ftyp ) { free( p_tmp_box ); return MP4_BoxGetRoot( s ); } free( p_tmp_box ); p_chunk = calloc( 1, sizeof( MP4_Box_t ) ); if( unlikely( p_chunk == NULL ) ) return NULL; p_chunk->i_type = ATOM_root; p_chunk->i_shortsize = 1; MP4_ReadBoxContainerChildren( s, p_chunk, ATOM_moof ); p_tmp_box = p_chunk->p_first; while( p_tmp_box ) { p_chunk->i_size += p_tmp_box->i_size; p_tmp_box = p_tmp_box->p_next; } return p_chunk; } /***************************************************************************** * MP4_BoxGetRoot : Parse the entire file, and create all boxes in memory ***************************************************************************** * The first box is a virtual box "root" and is the father for all first * level boxes for the file, a sort of virtual contener *****************************************************************************/ MP4_Box_t *MP4_BoxGetRoot( stream_t *s ) { MP4_Box_t *p_root; stream_t *p_stream; int i_result; p_root = malloc( sizeof( MP4_Box_t ) ); if( p_root == NULL ) return NULL; p_root->i_pos = 0; p_root->i_type = ATOM_root; p_root->i_shortsize = 1; /* could be a DASH stream for exemple, 0 means unknown or infinite size */ p_root->i_size = 0; CreateUUID( &p_root->i_uuid, p_root->i_type ); p_root->data.p_payload = NULL; p_root->p_father = NULL; p_root->p_first = NULL; p_root->p_last = NULL; p_root->p_next = NULL; p_stream = s; /* First get the moov */ i_result = MP4_ReadBoxContainerChildren( p_stream, p_root, ATOM_moov ); if( !i_result ) goto error; /* If there is a mvex box, it means fragmented MP4, and we're done */ else if( MP4_BoxCount( p_root, "moov/mvex" ) > 0 ) return p_root; p_root->i_size = stream_Size( s ); if( stream_Tell( s ) + 8 < stream_Size( s ) ) { /* Get the rest of the file */ i_result = MP4_ReadBoxContainerRaw( p_stream, p_root ); if( !i_result ) goto error; } MP4_Box_t *p_moov; MP4_Box_t *p_cmov; /* check if there is a cmov, if so replace compressed moov by uncompressed one */ if( ( ( p_moov = MP4_BoxGet( p_root, "moov" ) ) && ( p_cmov = MP4_BoxGet( p_root, "moov/cmov" ) ) ) || ( ( p_moov = MP4_BoxGet( p_root, "foov" ) ) && ( p_cmov = MP4_BoxGet( p_root, "foov/cmov" ) ) ) ) { /* rename the compressed moov as a box to skip */ p_moov->i_type = ATOM_skip; /* get uncompressed p_moov */ p_moov = p_cmov->data.p_cmov->p_moov; p_cmov->data.p_cmov->p_moov = NULL; /* make p_root father of this new moov */ p_moov->p_father = p_root; /* insert this new moov box as first child of p_root */ p_moov->p_next = p_root->p_first; p_root->p_first = p_moov; } return p_root; error: free( p_root ); stream_Seek( p_stream, 0 ); return NULL; } static void MP4_BoxDumpStructure_Internal( stream_t *s, MP4_Box_t *p_box, unsigned int i_level ) { MP4_Box_t *p_child; uint32_t i_displayedtype = p_box->i_type; if( ! MP4_BOX_TYPE_ASCII() ) ((char*)&i_displayedtype)[0] = 'c'; if( !i_level ) { msg_Dbg( s, "dumping root Box \"%4.4s\"", (char*)&i_displayedtype ); } else { char str[512]; if( i_level >= (sizeof(str) - 1)/4 ) return; memset( str, ' ', sizeof(str) ); for( unsigned i = 0; i < i_level; i++ ) { str[i*4] = '|'; } snprintf( &str[i_level * 4], sizeof(str) - 4*i_level, "+ %4.4s size %"PRIu64" offset %" PRIuMAX "%s", (char*)&i_displayedtype, p_box->i_size, (uintmax_t)p_box->i_pos, p_box->e_flags & BOX_FLAG_INCOMPLETE ? " (\?\?\?\?)" : "" ); msg_Dbg( s, "%s", str ); } p_child = p_box->p_first; while( p_child ) { MP4_BoxDumpStructure_Internal( s, p_child, i_level + 1 ); p_child = p_child->p_next; } } void MP4_BoxDumpStructure( stream_t *s, MP4_Box_t *p_box ) { MP4_BoxDumpStructure_Internal( s, p_box, 0 ); } /***************************************************************************** ***************************************************************************** ** ** High level methods to acces an MP4 file ** ***************************************************************************** *****************************************************************************/ static void get_token( char **ppsz_path, char **ppsz_token, int *pi_number ) { size_t i_len ; if( !*ppsz_path[0] ) { *ppsz_token = NULL; *pi_number = 0; return; } i_len = strcspn( *ppsz_path, "/[" ); if( !i_len && **ppsz_path == '/' ) { i_len = 1; } *ppsz_token = strndup( *ppsz_path, i_len ); if( unlikely(!*ppsz_token) ) abort(); *ppsz_path += i_len; if( **ppsz_path == '[' ) { (*ppsz_path)++; *pi_number = strtol( *ppsz_path, NULL, 10 ); while( **ppsz_path && **ppsz_path != ']' ) { (*ppsz_path)++; } if( **ppsz_path == ']' ) { (*ppsz_path)++; } } else { *pi_number = 0; } while( **ppsz_path == '/' ) { (*ppsz_path)++; } } static void MP4_BoxGet_Internal( MP4_Box_t **pp_result, MP4_Box_t *p_box, const char *psz_fmt, va_list args) { char *psz_dup; char *psz_path; char *psz_token; if( !p_box ) { *pp_result = NULL; return; } if( vasprintf( &psz_path, psz_fmt, args ) == -1 ) psz_path = NULL; if( !psz_path || !psz_path[0] ) { free( psz_path ); *pp_result = NULL; return; } // fprintf( stderr, "path:'%s'\n", psz_path ); psz_dup = psz_path; /* keep this pointer, as it need to be unallocated */ for( ; ; ) { int i_number; get_token( &psz_path, &psz_token, &i_number ); // fprintf( stderr, "path:'%s', token:'%s' n:%d\n", // psz_path,psz_token,i_number ); if( !psz_token ) { free( psz_dup ); *pp_result = p_box; return; } else if( !strcmp( psz_token, "/" ) ) { /* Find root box */ while( p_box && p_box->i_type != ATOM_root ) { p_box = p_box->p_father; } if( !p_box ) { goto error_box; } } else if( !strcmp( psz_token, "." ) ) { /* Do nothing */ } else if( !strcmp( psz_token, ".." ) ) { p_box = p_box->p_father; if( !p_box ) { goto error_box; } } else if( strlen( psz_token ) == 4 ) { uint32_t i_fourcc; i_fourcc = VLC_FOURCC( psz_token[0], psz_token[1], psz_token[2], psz_token[3] ); p_box = p_box->p_first; for( ; ; ) { if( !p_box ) { goto error_box; } if( p_box->i_type == i_fourcc ) { if( !i_number ) { break; } i_number--; } p_box = p_box->p_next; } } else if( *psz_token == '\0' ) { p_box = p_box->p_first; for( ; ; ) { if( !p_box ) { goto error_box; } if( !i_number ) { break; } i_number--; p_box = p_box->p_next; } } else { // fprintf( stderr, "Argg malformed token \"%s\"",psz_token ); goto error_box; } FREENULL( psz_token ); } return; error_box: free( psz_token ); free( psz_dup ); *pp_result = NULL; return; } /***************************************************************************** * MP4_BoxGet: find a box given a path relative to p_box ***************************************************************************** * Path Format: . .. / as usual * [number] to specifie box number ex: trak[12] * * ex: /moov/trak[12] * ../mdia *****************************************************************************/ MP4_Box_t *MP4_BoxGet( MP4_Box_t *p_box, const char *psz_fmt, ... ) { va_list args; MP4_Box_t *p_result; va_start( args, psz_fmt ); MP4_BoxGet_Internal( &p_result, p_box, psz_fmt, args ); va_end( args ); return( p_result ); } /***************************************************************************** * MP4_BoxCount: count box given a path relative to p_box ***************************************************************************** * Path Format: . .. / as usual * [number] to specifie box number ex: trak[12] * * ex: /moov/trak[12] * ../mdia *****************************************************************************/ int MP4_BoxCount( MP4_Box_t *p_box, const char *psz_fmt, ... ) { va_list args; int i_count; MP4_Box_t *p_result, *p_next; va_start( args, psz_fmt ); MP4_BoxGet_Internal( &p_result, p_box, psz_fmt, args ); va_end( args ); if( !p_result ) { return( 0 ); } i_count = 1; for( p_next = p_result->p_next; p_next != NULL; p_next = p_next->p_next) { if( p_next->i_type == p_result->i_type) { i_count++; } } return( i_count ); }
./CrossVul/dataset_final_sorted/CWE-191/c/good_2395_0
crossvul-cpp_data_bad_4828_0
/* * The two pass scaling function is based on: * Filtered Image Rescaling * Based on Gems III * - Schumacher general filtered image rescaling * (pp. 414-424) * by Dale Schumacher * * Additional changes by Ray Gardener, Daylon Graphics Ltd. * December 4, 1999 * * Ported to libgd by Pierre Joye. Support for multiple channels * added (argb for now). * * Initial sources code is avaibable in the Gems Source Code Packages: * http://www.acm.org/pubs/tog/GraphicsGems/GGemsIII.tar.gz * */ /* Summary: - Horizontal filter contributions are calculated on the fly, as each column is mapped from src to dst image. This lets us omit having to allocate a temporary full horizontal stretch of the src image. - If none of the src pixels within a sampling region differ, then the output pixel is forced to equal (any of) the source pixel. This ensures that filters do not corrupt areas of constant color. - Filter weight contribution results, after summing, are rounded to the nearest pixel color value instead of being casted to ILubyte (usually an int or char). Otherwise, artifacting occurs. */ /* Additional functions are available for simple rotation or up/downscaling. downscaling using the fixed point implementations are usually much faster than the existing gdImageCopyResampled while having a similar or better quality. For image rotations, the optimized versions have a lazy antialiasing for the edges of the images. For a much better antialiased result, the affine function is recommended. */ /* TODO: - Optimize pixel accesses and loops once we have continuous buffer - Add scale support for a portion only of an image (equivalent of copyresized/resampled) */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #undef NDEBUG /* Comment out this line to enable asserts. * TODO: This logic really belongs in cmake and configure. */ #define NDEBUG 1 #include <assert.h> #include "gd.h" #include "gdhelpers.h" #include "gd_intern.h" #ifdef _MSC_VER # pragma optimize("t", on) # include <emmintrin.h> #endif static gdImagePtr gdImageScaleBilinear(gdImagePtr im, const unsigned int new_width, const unsigned int new_height); static gdImagePtr gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width, const unsigned int height); static gdImagePtr gdImageScaleNearestNeighbour(gdImagePtr im, const unsigned int width, const unsigned int height); static gdImagePtr gdImageRotateNearestNeighbour(gdImagePtr src, const float degrees, const int bgColor); static gdImagePtr gdImageRotateGeneric(gdImagePtr src, const float degrees, const int bgColor); #define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x))) /* only used here, let do a generic fixed point integers later if required by other part of GD */ typedef long gdFixed; /* Integer to fixed point */ #define gd_itofx(x) ((x) << 8) /* Float to fixed point */ #define gd_ftofx(x) (long)((x) * 256) /* Double to fixed point */ #define gd_dtofx(x) (long)((x) * 256) /* Fixed point to integer */ #define gd_fxtoi(x) ((x) >> 8) /* Fixed point to float */ # define gd_fxtof(x) ((float)(x) / 256) /* Fixed point to double */ #define gd_fxtod(x) ((double)(x) / 256) /* Multiply a fixed by a fixed */ #define gd_mulfx(x,y) (((x) * (y)) >> 8) /* Divide a fixed by a fixed */ #define gd_divfx(x,y) (((x) << 8) / (y)) typedef struct { double *Weights; /* Normalized weights of neighboring pixels */ int Left,Right; /* Bounds of source pixels window */ } ContributionType; /* Contirbution information for a single pixel */ typedef struct { ContributionType *ContribRow; /* Row (or column) of contribution weights */ unsigned int WindowSize, /* Filter window size (of affecting source pixels) */ LineLength; /* Length of line (no. or rows / cols) */ } LineContribType; /* Each core filter has its own radius */ #define DEFAULT_FILTER_LINEAR 1.0f #define DEFAULT_FILTER_BICUBIC 3.0f #define DEFAULT_FILTER_BOX 0.5f #define DEFAULT_FILTER_GENERALIZED_CUBIC 0.5f #define DEFAULT_FILTER_RADIUS 1.0f #define DEFAULT_LANCZOS8_RADIUS 8.0f #define DEFAULT_LANCZOS3_RADIUS 3.0f #define DEFAULT_HERMITE_RADIUS 1.0f #define DEFAULT_BOX_RADIUS 0.5f #define DEFAULT_TRIANGLE_RADIUS 1.0f #define DEFAULT_BELL_RADIUS 1.5f #define DEFAULT_CUBICSPLINE_RADIUS 2.0f #define DEFAULT_MITCHELL_RADIUS 2.0f #define DEFAULT_COSINE_RADIUS 1.0f #define DEFAULT_CATMULLROM_RADIUS 2.0f #define DEFAULT_QUADRATIC_RADIUS 1.5f #define DEFAULT_QUADRATICBSPLINE_RADIUS 1.5f #define DEFAULT_CUBICCONVOLUTION_RADIUS 3.0f #define DEFAULT_GAUSSIAN_RADIUS 1.0f #define DEFAULT_HANNING_RADIUS 1.0f #define DEFAULT_HAMMING_RADIUS 1.0f #define DEFAULT_SINC_RADIUS 1.0f #define DEFAULT_WELSH_RADIUS 1.0f static double KernelBessel_J1(const double x) { double p, q; register long i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p = Pone[8]; q = Qone[8]; for (i=7; i >= 0; i--) { p = p*x*x+Pone[i]; q = q*x*x+Qone[i]; } return (double)(p/q); } static double KernelBessel_P1(const double x) { double p, q; register long i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); } static double KernelBessel_Q1(const double x) { double p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); } static double KernelBessel_Order1(double x) { double p, q; if (x == 0.0) return (0.0f); p = x; if (x < 0.0) x=(-x); if (x < 8.0) return (p*KernelBessel_J1(x)); q = (double)sqrt(2.0f/(M_PI*x))*(double)(KernelBessel_P1(x)*(1.0f/sqrt(2.0f)*(sin(x)-cos(x)))-8.0f/x*KernelBessel_Q1(x)* (-1.0f/sqrt(2.0f)*(sin(x)+cos(x)))); if (p < 0.0f) q = (-q); return (q); } static double filter_bessel(const double x) { if (x == 0.0f) return (double)(M_PI/4.0f); return (KernelBessel_Order1((double)M_PI*x)/(2.0f*x)); } static double filter_blackman(const double x) { return (0.42f+0.5f*(double)cos(M_PI*x)+0.08f*(double)cos(2.0f*M_PI*x)); } double filter_linear(const double x) { double ax = fabs(x); if (ax < 1.0f) { return (1.0f - ax); } return 0.0f; } /** * Bicubic interpolation kernel (a=-1): \verbatim / | 1-2|t|**2+|t|**3 , if |t| < 1 h(t) = | 4-8|t|+5|t|**2-|t|**3 , if 1<=|t|<2 | 0 , otherwise \ \endverbatim * ***bd*** 2.2004 */ static double filter_bicubic(const double t) { const double abs_t = (double)fabs(t); const double abs_t_sq = abs_t * abs_t; if (abs_t<1) return 1-2*abs_t_sq+abs_t_sq*abs_t; if (abs_t<2) return 4 - 8*abs_t +5*abs_t_sq - abs_t_sq*abs_t; return 0; } /** * Generalized cubic kernel (for a=-1 it is the same as BicubicKernel): \verbatim / | (a+2)|t|**3 - (a+3)|t|**2 + 1 , |t| <= 1 h(t) = | a|t|**3 - 5a|t|**2 + 8a|t| - 4a , 1 < |t| <= 2 | 0 , otherwise \ \endverbatim * Often used values for a are -1 and -1/2. */ static double filter_generalized_cubic(const double t) { const double a = -DEFAULT_FILTER_GENERALIZED_CUBIC; double abs_t = (double)fabs(t); double abs_t_sq = abs_t * abs_t; if (abs_t < 1) return (a + 2) * abs_t_sq * abs_t - (a + 3) * abs_t_sq + 1; if (abs_t < 2) return a * abs_t_sq * abs_t - 5 * a * abs_t_sq + 8 * a * abs_t - 4 * a; return 0; } #ifdef FUNCTION_NOT_USED_YET /* CubicSpline filter, default radius 2 */ static double filter_cubic_spline(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0 ) { const double x2 = x*x; return (0.5 * x2 * x - x2 + 2.0 / 3.0); } if (x < 2.0) { return (pow(2.0 - x, 3.0)/6.0); } return 0; } #endif #ifdef FUNCTION_NOT_USED_YET /* CubicConvolution filter, default radius 3 */ static double filter_cubic_convolution(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; const double x2 = x1 * x1; const double x2_x = x2 * x; if (x <= 1.0) return ((4.0 / 3.0)* x2_x - (7.0 / 3.0) * x2 + 1.0); if (x <= 2.0) return (- (7.0 / 12.0) * x2_x + 3 * x2 - (59.0 / 12.0) * x + 2.5); if (x <= 3.0) return ( (1.0/12.0) * x2_x - (2.0 / 3.0) * x2 + 1.75 * x - 1.5); return 0; } #endif static double filter_box(double x) { if (x < - DEFAULT_FILTER_BOX) return 0.0f; if (x < DEFAULT_FILTER_BOX) return 1.0f; return 0.0f; } static double filter_catmullrom(const double x) { if (x < -2.0) return(0.0f); if (x < -1.0) return(0.5f*(4.0f+x*(8.0f+x*(5.0f+x)))); if (x < 0.0) return(0.5f*(2.0f+x*x*(-5.0f-3.0f*x))); if (x < 1.0) return(0.5f*(2.0f+x*x*(-5.0f+3.0f*x))); if (x < 2.0) return(0.5f*(4.0f+x*(-8.0f+x*(5.0f-x)))); return(0.0f); } #ifdef FUNCTION_NOT_USED_YET static double filter_filter(double t) { /* f(t) = 2|t|^3 - 3|t|^2 + 1, -1 <= t <= 1 */ if(t < 0.0) t = -t; if(t < 1.0) return((2.0 * t - 3.0) * t * t + 1.0); return(0.0); } #endif #ifdef FUNCTION_NOT_USED_YET /* Lanczos8 filter, default radius 8 */ static double filter_lanczos8(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; #define R DEFAULT_LANCZOS8_RADIUS if ( x == 0.0) return 1; if ( x < R) { return R * sin(x*M_PI) * sin(x * M_PI/ R) / (x * M_PI * x * M_PI); } return 0.0; #undef R } #endif #ifdef FUNCTION_NOT_USED_YET /* Lanczos3 filter, default radius 3 */ static double filter_lanczos3(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; #define R DEFAULT_LANCZOS3_RADIUS if ( x == 0.0) return 1; if ( x < R) { return R * sin(x*M_PI) * sin(x * M_PI / R) / (x * M_PI * x * M_PI); } return 0.0; #undef R } #endif /* Hermite filter, default radius 1 */ static double filter_hermite(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0) return ((2.0 * x - 3) * x * x + 1.0 ); return 0.0; } /* Trangle filter, default radius 1 */ static double filter_triangle(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0) return (1.0 - x); return 0.0; } /* Bell filter, default radius 1.5 */ static double filter_bell(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 0.5) return (0.75 - x*x); if (x < 1.5) return (0.5 * pow(x - 1.5, 2.0)); return 0.0; } /* Mitchell filter, default radius 2.0 */ static double filter_mitchell(const double x) { #define KM_B (1.0f/3.0f) #define KM_C (1.0f/3.0f) #define KM_P0 (( 6.0f - 2.0f * KM_B ) / 6.0f) #define KM_P2 ((-18.0f + 12.0f * KM_B + 6.0f * KM_C) / 6.0f) #define KM_P3 (( 12.0f - 9.0f * KM_B - 6.0f * KM_C) / 6.0f) #define KM_Q0 (( 8.0f * KM_B + 24.0f * KM_C) / 6.0f) #define KM_Q1 ((-12.0f * KM_B - 48.0f * KM_C) / 6.0f) #define KM_Q2 (( 6.0f * KM_B + 30.0f * KM_C) / 6.0f) #define KM_Q3 (( -1.0f * KM_B - 6.0f * KM_C) / 6.0f) if (x < -2.0) return(0.0f); if (x < -1.0) return(KM_Q0-x*(KM_Q1-x*(KM_Q2-x*KM_Q3))); if (x < 0.0f) return(KM_P0+x*x*(KM_P2-x*KM_P3)); if (x < 1.0f) return(KM_P0+x*x*(KM_P2+x*KM_P3)); if (x < 2.0f) return(KM_Q0+x*(KM_Q1+x*(KM_Q2+x*KM_Q3))); return(0.0f); } #ifdef FUNCTION_NOT_USED_YET /* Cosine filter, default radius 1 */ static double filter_cosine(const double x) { if ((x >= -1.0) && (x <= 1.0)) return ((cos(x * M_PI) + 1.0)/2.0); return 0; } #endif /* Quadratic filter, default radius 1.5 */ static double filter_quadratic(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x <= 0.5) return (- 2.0 * x * x + 1); if (x <= 1.5) return (x * x - 2.5* x + 1.5); return 0.0; } static double filter_bspline(const double x) { if (x>2.0f) { return 0.0f; } else { double a, b, c, d; /* Was calculated anyway cause the "if((x-1.0f) < 0)" */ const double xm1 = x - 1.0f; const double xp1 = x + 1.0f; const double xp2 = x + 2.0f; if ((xp2) <= 0.0f) a = 0.0f; else a = xp2*xp2*xp2; if ((xp1) <= 0.0f) b = 0.0f; else b = xp1*xp1*xp1; if (x <= 0) c = 0.0f; else c = x*x*x; if ((xm1) <= 0.0f) d = 0.0f; else d = xm1*xm1*xm1; return (0.16666666666666666667f * (a - (4.0f * b) + (6.0f * c) - (4.0f * d))); } } #ifdef FUNCTION_NOT_USED_YET /* QuadraticBSpline filter, default radius 1.5 */ static double filter_quadratic_bspline(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x <= 0.5) return (- x * x + 0.75); if (x <= 1.5) return (0.5 * x * x - 1.5 * x + 1.125); return 0.0; } #endif static double filter_gaussian(const double x) { /* return(exp((double) (-2.0 * x * x)) * sqrt(2.0 / M_PI)); */ return (double)(exp(-2.0f * x * x) * 0.79788456080287f); } static double filter_hanning(const double x) { /* A Cosine windowing function */ return(0.5 + 0.5 * cos(M_PI * x)); } static double filter_hamming(const double x) { /* should be (0.54+0.46*cos(M_PI*(double) x)); but this approximation is sufficient */ if (x < -1.0f) return 0.0f; if (x < 0.0f) return 0.92f*(-2.0f*x-3.0f)*x*x+1.0f; if (x < 1.0f) return 0.92f*(2.0f*x-3.0f)*x*x+1.0f; return 0.0f; } static double filter_power(const double x) { const double a = 2.0f; if (fabs(x)>1) return 0.0f; return (1.0f - (double)fabs(pow(x,a))); } static double filter_sinc(const double x) { /* X-scaled Sinc(x) function. */ if (x == 0.0) return(1.0); return (sin(M_PI * (double) x) / (M_PI * (double) x)); } #ifdef FUNCTION_NOT_USED_YET static double filter_welsh(const double x) { /* Welsh parabolic windowing filter */ if (x < 1.0) return(1 - x*x); return(0.0); } #endif #if defined(_MSC_VER) && !defined(inline) # define inline __inline #endif /* keep it for future usage for affine copy over an existing image, targetting fix for 2.2.2 */ #ifdef FUNCTION_NOT_USED_YET /* Copied from upstream's libgd */ static inline int _color_blend (const int dst, const int src) { const int src_alpha = gdTrueColorGetAlpha(src); if( src_alpha == gdAlphaOpaque ) { return src; } else { const int dst_alpha = gdTrueColorGetAlpha(dst); if( src_alpha == gdAlphaTransparent ) return dst; if( dst_alpha == gdAlphaTransparent ) { return src; } else { register int alpha, red, green, blue; const int src_weight = gdAlphaTransparent - src_alpha; const int dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax; const int tot_weight = src_weight + dst_weight; alpha = src_alpha * dst_alpha / gdAlphaMax; red = (gdTrueColorGetRed(src) * src_weight + gdTrueColorGetRed(dst) * dst_weight) / tot_weight; green = (gdTrueColorGetGreen(src) * src_weight + gdTrueColorGetGreen(dst) * dst_weight) / tot_weight; blue = (gdTrueColorGetBlue(src) * src_weight + gdTrueColorGetBlue(dst) * dst_weight) / tot_weight; return ((alpha << 24) + (red << 16) + (green << 8) + blue); } } } static inline int _setEdgePixel(const gdImagePtr src, unsigned int x, unsigned int y, gdFixed coverage, const int bgColor) { const gdFixed f_127 = gd_itofx(127); register int c = src->tpixels[y][x]; c = c | (( (int) (gd_fxtof(gd_mulfx(coverage, f_127)) + 50.5f)) << 24); return _color_blend(bgColor, c); } #endif static inline int getPixelOverflowTC(gdImagePtr im, const int x, const int y, const int bgColor) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->tpixels[y][x]; if (c == im->transparent) { return bgColor == -1 ? gdTrueColorAlpha(0, 0, 0, 127) : bgColor; } return c; } else { return bgColor; } } #define colorIndex2RGBA(c) gdTrueColorAlpha(im->red[(c)], im->green[(c)], im->blue[(c)], im->alpha[(c)]) #define colorIndex2RGBcustomA(c, a) gdTrueColorAlpha(im->red[(c)], im->green[(c)], im->blue[(c)], im->alpha[(a)]) static inline int getPixelOverflowPalette(gdImagePtr im, const int x, const int y, const int bgColor) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->pixels[y][x]; if (c == im->transparent) { return bgColor == -1 ? gdTrueColorAlpha(0, 0, 0, 127) : bgColor; } return colorIndex2RGBA(c); } else { return bgColor; } } static int getPixelInterpolateWeight(gdImagePtr im, const double x, const double y, const int bgColor) { /* Closest pixel <= (xf,yf) */ int sx = (int)(x); int sy = (int)(y); const double xf = x - (double)sx; const double yf = y - (double)sy; const double nxf = (double) 1.0 - xf; const double nyf = (double) 1.0 - yf; const double m1 = xf * yf; const double m2 = nxf * yf; const double m3 = xf * nyf; const double m4 = nxf * nyf; /* get color values of neighbouring pixels */ const int c1 = im->trueColor == 1 ? getPixelOverflowTC(im, sx, sy, bgColor) : getPixelOverflowPalette(im, sx, sy, bgColor); const int c2 = im->trueColor == 1 ? getPixelOverflowTC(im, sx - 1, sy, bgColor) : getPixelOverflowPalette(im, sx - 1, sy, bgColor); const int c3 = im->trueColor == 1 ? getPixelOverflowTC(im, sx, sy - 1, bgColor) : getPixelOverflowPalette(im, sx, sy - 1, bgColor); const int c4 = im->trueColor == 1 ? getPixelOverflowTC(im, sx - 1, sy - 1, bgColor) : getPixelOverflowPalette(im, sx, sy - 1, bgColor); int r, g, b, a; if (x < 0) sx--; if (y < 0) sy--; /* component-wise summing-up of color values */ if (im->trueColor) { r = (int)(m1*gdTrueColorGetRed(c1) + m2*gdTrueColorGetRed(c2) + m3*gdTrueColorGetRed(c3) + m4*gdTrueColorGetRed(c4)); g = (int)(m1*gdTrueColorGetGreen(c1) + m2*gdTrueColorGetGreen(c2) + m3*gdTrueColorGetGreen(c3) + m4*gdTrueColorGetGreen(c4)); b = (int)(m1*gdTrueColorGetBlue(c1) + m2*gdTrueColorGetBlue(c2) + m3*gdTrueColorGetBlue(c3) + m4*gdTrueColorGetBlue(c4)); a = (int)(m1*gdTrueColorGetAlpha(c1) + m2*gdTrueColorGetAlpha(c2) + m3*gdTrueColorGetAlpha(c3) + m4*gdTrueColorGetAlpha(c4)); } else { r = (int)(m1*im->red[(c1)] + m2*im->red[(c2)] + m3*im->red[(c3)] + m4*im->red[(c4)]); g = (int)(m1*im->green[(c1)] + m2*im->green[(c2)] + m3*im->green[(c3)] + m4*im->green[(c4)]); b = (int)(m1*im->blue[(c1)] + m2*im->blue[(c2)] + m3*im->blue[(c3)] + m4*im->blue[(c4)]); a = (int)(m1*im->alpha[(c1)] + m2*im->alpha[(c2)] + m3*im->alpha[(c3)] + m4*im->alpha[(c4)]); } r = CLAMP(r, 0, 255); g = CLAMP(g, 0, 255); b = CLAMP(b, 0, 255); a = CLAMP(a, 0, gdAlphaMax); return gdTrueColorAlpha(r, g, b, a); } /** * InternalFunction: getPixelInterpolated * Returns the interpolated color value using the default interpolation * method. The returned color is always in the ARGB format (truecolor). * * Parameters: * im - Image to set the default interpolation method * y - X value of the ideal position * y - Y value of the ideal position * method - Interpolation method <gdInterpolationMethod> * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE * * See also: * <gdSetInterpolationMethod> */ int getPixelInterpolated(gdImagePtr im, const double x, const double y, const int bgColor) { const int xi=(int)(x); const int yi=(int)(y); int yii; int i; double kernel, kernel_cache_y; double kernel_x[12], kernel_y[4]; double new_r = 0.0f, new_g = 0.0f, new_b = 0.0f, new_a = 0.0f; /* These methods use special implementations */ if (im->interpolation_id == GD_NEAREST_NEIGHBOUR) { return -1; } if (im->interpolation_id == GD_WEIGHTED4) { return getPixelInterpolateWeight(im, x, y, bgColor); } if (im->interpolation_id == GD_NEAREST_NEIGHBOUR) { if (im->trueColor == 1) { return getPixelOverflowTC(im, xi, yi, bgColor); } else { return getPixelOverflowPalette(im, xi, yi, bgColor); } } if (im->interpolation) { for (i=0; i<4; i++) { kernel_x[i] = (double) im->interpolation((double)(xi+i-1-x)); kernel_y[i] = (double) im->interpolation((double)(yi+i-1-y)); } } else { return -1; } /* * TODO: use the known fast rgba multiplication implementation once * the new formats are in place */ for (yii = yi-1; yii < yi+3; yii++) { int xii; kernel_cache_y = kernel_y[yii-(yi-1)]; if (im->trueColor) { for (xii=xi-1; xii<xi+3; xii++) { const int rgbs = getPixelOverflowTC(im, xii, yii, bgColor); kernel = kernel_cache_y * kernel_x[xii-(xi-1)]; new_r += kernel * gdTrueColorGetRed(rgbs); new_g += kernel * gdTrueColorGetGreen(rgbs); new_b += kernel * gdTrueColorGetBlue(rgbs); new_a += kernel * gdTrueColorGetAlpha(rgbs); } } else { for (xii=xi-1; xii<xi+3; xii++) { const int rgbs = getPixelOverflowPalette(im, xii, yii, bgColor); kernel = kernel_cache_y * kernel_x[xii-(xi-1)]; new_r += kernel * gdTrueColorGetRed(rgbs); new_g += kernel * gdTrueColorGetGreen(rgbs); new_b += kernel * gdTrueColorGetBlue(rgbs); new_a += kernel * gdTrueColorGetAlpha(rgbs); } } } new_r = CLAMP(new_r, 0, 255); new_g = CLAMP(new_g, 0, 255); new_b = CLAMP(new_b, 0, 255); new_a = CLAMP(new_a, 0, gdAlphaMax); return gdTrueColorAlpha(((int)new_r), ((int)new_g), ((int)new_b), ((int)new_a)); } static inline LineContribType * _gdContributionsAlloc(unsigned int line_length, unsigned int windows_size) { unsigned int u = 0; LineContribType *res; int overflow_error = 0; res = (LineContribType *) gdMalloc(sizeof(LineContribType)); if (!res) { return NULL; } res->WindowSize = windows_size; res->LineLength = line_length; if (overflow2(line_length, sizeof(ContributionType))) { gdFree(res); return NULL; } res->ContribRow = (ContributionType *) gdMalloc(line_length * sizeof(ContributionType)); if (res->ContribRow == NULL) { gdFree(res); return NULL; } for (u = 0 ; u < line_length ; u++) { if (overflow2(windows_size, sizeof(double))) { overflow_error = 1; } else { res->ContribRow[u].Weights = (double *) gdMalloc(windows_size * sizeof(double)); } if (overflow_error == 1 || res->ContribRow[u].Weights == NULL) { unsigned int i; u--; for (i=0;i<=u;i++) { gdFree(res->ContribRow[i].Weights); } gdFree(res->ContribRow); gdFree(res); return NULL; } } return res; } static inline void _gdContributionsFree(LineContribType * p) { unsigned int u; for (u = 0; u < p->LineLength; u++) { gdFree(p->ContribRow[u].Weights); } gdFree(p->ContribRow); gdFree(p); } static inline LineContribType *_gdContributionsCalc(unsigned int line_size, unsigned int src_size, double scale_d, const interpolation_method pFilter) { double width_d; double scale_f_d = 1.0; const double filter_width_d = DEFAULT_BOX_RADIUS; int windows_size; unsigned int u; LineContribType *res; if (scale_d < 1.0) { width_d = filter_width_d / scale_d; scale_f_d = scale_d; } else { width_d= filter_width_d; } windows_size = 2 * (int)ceil(width_d) + 1; res = _gdContributionsAlloc(line_size, windows_size); if (res == NULL) { return NULL; } for (u = 0; u < line_size; u++) { const double dCenter = (double)u / scale_d; /* get the significant edge points affecting the pixel */ register int iLeft = MAX(0, (int)floor (dCenter - width_d)); int iRight = MIN((int)ceil(dCenter + width_d), (int)src_size - 1); double dTotalWeight = 0.0; int iSrc; /* Cut edge points to fit in filter window in case of spill-off */ if (iRight - iLeft + 1 > windows_size) { if (iLeft < ((int)src_size - 1 / 2)) { iLeft++; } else { iRight--; } } res->ContribRow[u].Left = iLeft; res->ContribRow[u].Right = iRight; for (iSrc = iLeft; iSrc <= iRight; iSrc++) { dTotalWeight += (res->ContribRow[u].Weights[iSrc-iLeft] = scale_f_d * (*pFilter)(scale_f_d * (dCenter - (double)iSrc))); } if (dTotalWeight < 0.0) { _gdContributionsFree(res); return NULL; } if (dTotalWeight > 0.0) { for (iSrc = iLeft; iSrc <= iRight; iSrc++) { res->ContribRow[u].Weights[iSrc-iLeft] /= dTotalWeight; } } } return res; } static inline void _gdScaleOneAxis(gdImagePtr pSrc, gdImagePtr dst, unsigned int dst_len, unsigned int row, LineContribType *contrib, gdAxis axis) { unsigned int ndx; for (ndx = 0; ndx < dst_len; ndx++) { double r = 0, g = 0, b = 0, a = 0; const int left = contrib->ContribRow[ndx].Left; const int right = contrib->ContribRow[ndx].Right; int *dest = (axis == HORIZONTAL) ? &dst->tpixels[row][ndx] : &dst->tpixels[ndx][row]; int i; /* Accumulate each channel */ for (i = left; i <= right; i++) { const int left_channel = i - left; const int srcpx = (axis == HORIZONTAL) ? pSrc->tpixels[row][i] : pSrc->tpixels[i][row]; r += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetRed(srcpx)); g += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetGreen(srcpx)); b += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetBlue(srcpx)); a += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetAlpha(srcpx)); }/* for */ *dest = gdTrueColorAlpha(uchar_clamp(r, 0xFF), uchar_clamp(g, 0xFF), uchar_clamp(b, 0xFF), uchar_clamp(a, 0x7F)); /* alpha is 0..127 */ }/* for */ }/* _gdScaleOneAxis*/ static inline int _gdScalePass(const gdImagePtr pSrc, const unsigned int src_len, const gdImagePtr pDst, const unsigned int dst_len, const unsigned int num_lines, const gdAxis axis) { unsigned int line_ndx; LineContribType * contrib; /* Same dim, just copy it. */ assert(dst_len != src_len); // TODO: caller should handle this. contrib = _gdContributionsCalc(dst_len, src_len, (double)dst_len / (double)src_len, pSrc->interpolation); if (contrib == NULL) { return 0; } /* Scale each line */ for (line_ndx = 0; line_ndx < num_lines; line_ndx++) { _gdScaleOneAxis(pSrc, pDst, dst_len, line_ndx, contrib, axis); } _gdContributionsFree (contrib); return 1; }/* _gdScalePass*/ static gdImagePtr gdImageScaleTwoPass(const gdImagePtr src, const unsigned int new_width, const unsigned int new_height) { const unsigned int src_width = src->sx; const unsigned int src_height = src->sy; gdImagePtr tmp_im = NULL; gdImagePtr dst = NULL; int scale_pass_res; assert(src != NULL); /* First, handle the trivial case. */ if (src_width == new_width && src_height == new_height) { return gdImageClone(src); }/* if */ /* Convert to truecolor if it isn't; this code requires it. */ if (!src->trueColor) { gdImagePaletteToTrueColor(src); }/* if */ /* Scale horizontally unless sizes are the same. */ if (src_width == new_width) { tmp_im = src; } else { tmp_im = gdImageCreateTrueColor(new_width, src_height); if (tmp_im == NULL) { return NULL; } gdImageSetInterpolationMethod(tmp_im, src->interpolation_id); scale_pass_res = _gdScalePass(src, src_width, tmp_im, new_width, src_height, HORIZONTAL); if (scale_pass_res != 1) { gdImageDestroy(tmp_im); return NULL; } }/* if .. else*/ /* If vertical sizes match, we're done. */ if (src_height == new_height) { assert(tmp_im != src); return tmp_im; }/* if */ /* Otherwise, we need to scale vertically. */ dst = gdImageCreateTrueColor(new_width, new_height); if (dst != NULL) { gdImageSetInterpolationMethod(dst, src->interpolation_id); scale_pass_res = _gdScalePass(tmp_im, src_height, dst, new_height, new_width, VERTICAL); if (scale_pass_res != 1) { gdImageDestroy(dst); if (src != tmp_im) { gdImageDestroy(tmp_im); } return NULL; } }/* if */ if (src != tmp_im) { gdImageDestroy(tmp_im); }/* if */ return dst; }/* gdImageScaleTwoPass*/ /* BilinearFixed, BicubicFixed and nearest implementations are rewamped versions of the implementation in CBitmapEx http://www.codeproject.com/Articles/29121/CBitmapEx-Free-C-Bitmap-Manipulation-Class Integer only implementation, good to have for common usages like pre scale very large images before using another interpolation methods for the last step. */ static gdImagePtr gdImageScaleNearestNeighbour(gdImagePtr im, const unsigned int width, const unsigned int height) { const unsigned long new_width = MAX(1, width); const unsigned long new_height = MAX(1, height); const float dx = (float)im->sx / (float)new_width; const float dy = (float)im->sy / (float)new_height; const gdFixed f_dx = gd_ftofx(dx); const gdFixed f_dy = gd_ftofx(dy); gdImagePtr dst_img; unsigned long dst_offset_x; unsigned long dst_offset_y = 0; unsigned int i; dst_img = gdImageCreateTrueColor(new_width, new_height); if (dst_img == NULL) { return NULL; } for (i=0; i<new_height; i++) { unsigned int j; dst_offset_x = 0; if (im->trueColor) { for (j=0; j<new_width; j++) { const gdFixed f_i = gd_itofx(i); const gdFixed f_j = gd_itofx(j); const gdFixed f_a = gd_mulfx(f_i, f_dy); const gdFixed f_b = gd_mulfx(f_j, f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); dst_img->tpixels[dst_offset_y][dst_offset_x++] = im->tpixels[m][n]; } } else { for (j=0; j<new_width; j++) { const gdFixed f_i = gd_itofx(i); const gdFixed f_j = gd_itofx(j); const gdFixed f_a = gd_mulfx(f_i, f_dy); const gdFixed f_b = gd_mulfx(f_j, f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); dst_img->tpixels[dst_offset_y][dst_offset_x++] = colorIndex2RGBA(im->pixels[m][n]); } } dst_offset_y++; } return dst_img; } #if 0 static inline int getPixelOverflowColorTC(gdImagePtr im, const int x, const int y, const int color) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->tpixels[y][x]; if (c == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } return c; } else { register int border = 0; if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y > im->cy2) { if (x >= im->cx1 && x <= im->cx1) { border = im->tpixels[im->cy2][x]; goto processborder; } else { return gdTrueColorAlpha(0, 0, 0, 127); } } /* y is bound safe at this point */ if (x < im->cx1) { border = im->tpixels[y][im->cx1]; goto processborder; } if (x > im->cx2) { border = im->tpixels[y][im->cx2]; } processborder: if (border == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } else{ return gdTrueColorAlpha(gdTrueColorGetRed(border), gdTrueColorGetGreen(border), gdTrueColorGetBlue(border), 127); } } } #endif static gdImagePtr gdImageScaleBilinearPalette(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { long _width = MAX(1, new_width); long _height = MAX(1, new_height); float dx = (float)gdImageSX(im) / (float)_width; float dy = (float)gdImageSY(im) / (float)_height; gdFixed f_dx = gd_ftofx(dx); gdFixed f_dy = gd_ftofx(dy); gdFixed f_1 = gd_itofx(1); int dst_offset_h; int dst_offset_v = 0; long i; gdImagePtr new_img; const int transparent = im->transparent; new_img = gdImageCreateTrueColor(new_width, new_height); if (new_img == NULL) { return NULL; } if (transparent < 0) { /* uninitialized */ new_img->transparent = -1; } else { new_img->transparent = gdTrueColorAlpha(im->red[transparent], im->green[transparent], im->blue[transparent], im->alpha[transparent]); } for (i=0; i < _height; i++) { long j; const gdFixed f_i = gd_itofx(i); const gdFixed f_a = gd_mulfx(f_i, f_dy); register long m = gd_fxtoi(f_a); dst_offset_h = 0; for (j=0; j < _width; j++) { /* Update bitmap */ gdFixed f_j = gd_itofx(j); gdFixed f_b = gd_mulfx(f_j, f_dx); const long n = gd_fxtoi(f_b); gdFixed f_f = f_a - gd_itofx(m); gdFixed f_g = f_b - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); unsigned int pixel1; unsigned int pixel2; unsigned int pixel3; unsigned int pixel4; register gdFixed f_r1, f_r2, f_r3, f_r4, f_g1, f_g2, f_g3, f_g4, f_b1, f_b2, f_b3, f_b4, f_a1, f_a2, f_a3, f_a4; /* 0 for bgColor; (n,m) is supposed to be valid anyway */ pixel1 = getPixelOverflowPalette(im, n, m, 0); pixel2 = getPixelOverflowPalette(im, n + 1, m, pixel1); pixel3 = getPixelOverflowPalette(im, n, m + 1, pixel1); pixel4 = getPixelOverflowPalette(im, n + 1, m + 1, pixel1); f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); { const unsigned char red = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4)); const unsigned char green = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4)); const unsigned char blue = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4)); const unsigned char alpha = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4)); new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha); } dst_offset_h++; } dst_offset_v++; } return new_img; } static gdImagePtr gdImageScaleBilinearTC(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { long dst_w = MAX(1, new_width); long dst_h = MAX(1, new_height); float dx = (float)gdImageSX(im) / (float)dst_w; float dy = (float)gdImageSY(im) / (float)dst_h; gdFixed f_dx = gd_ftofx(dx); gdFixed f_dy = gd_ftofx(dy); gdFixed f_1 = gd_itofx(1); int dst_offset_h; int dst_offset_v = 0; long i; gdImagePtr new_img; new_img = gdImageCreateTrueColor(new_width, new_height); if (!new_img){ return NULL; } for (i=0; i < dst_h; i++) { long j; dst_offset_h = 0; for (j=0; j < dst_w; j++) { /* Update bitmap */ gdFixed f_i = gd_itofx(i); gdFixed f_j = gd_itofx(j); gdFixed f_a = gd_mulfx(f_i, f_dy); gdFixed f_b = gd_mulfx(f_j, f_dx); const gdFixed m = gd_fxtoi(f_a); const gdFixed n = gd_fxtoi(f_b); gdFixed f_f = f_a - gd_itofx(m); gdFixed f_g = f_b - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); unsigned int pixel1; unsigned int pixel2; unsigned int pixel3; unsigned int pixel4; register gdFixed f_r1, f_r2, f_r3, f_r4, f_g1, f_g2, f_g3, f_g4, f_b1, f_b2, f_b3, f_b4, f_a1, f_a2, f_a3, f_a4; /* 0 for bgColor; (n,m) is supposed to be valid anyway */ pixel1 = getPixelOverflowTC(im, n, m, 0); pixel2 = getPixelOverflowTC(im, n + 1, m, pixel1); pixel3 = getPixelOverflowTC(im, n, m + 1, pixel1); pixel4 = getPixelOverflowTC(im, n + 1, m + 1, pixel1); f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); { const unsigned char red = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4)); const unsigned char green = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4)); const unsigned char blue = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4)); const unsigned char alpha = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4)); new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha); } dst_offset_h++; } dst_offset_v++; } return new_img; } static gdImagePtr gdImageScaleBilinear(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { if (im->trueColor) { return gdImageScaleBilinearTC(im, new_width, new_height); } else { return gdImageScaleBilinearPalette(im, new_width, new_height); } } static gdImagePtr gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width, const unsigned int height) { const long new_width = MAX(1, width); const long new_height = MAX(1, height); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_dx = gd_ftofx((float)src_w / (float)new_width); const gdFixed f_dy = gd_ftofx((float)src_h / (float)new_height); const gdFixed f_1 = gd_itofx(1); const gdFixed f_2 = gd_itofx(2); const gdFixed f_4 = gd_itofx(4); const gdFixed f_6 = gd_itofx(6); const gdFixed f_gamma = gd_ftofx(1.04f); gdImagePtr dst; unsigned int dst_offset_x; unsigned int dst_offset_y = 0; long i; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i=0; i < new_height; i++) { long j; dst_offset_x = 0; for (j=0; j < new_width; j++) { const gdFixed f_a = gd_mulfx(gd_itofx(i), f_dy); const gdFixed f_b = gd_mulfx(gd_itofx(j), f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); const gdFixed f_f = f_a - gd_itofx(m); const gdFixed f_g = f_b - gd_itofx(n); unsigned int src_offset_x[16], src_offset_y[16]; long k; register gdFixed f_red = 0, f_green = 0, f_blue = 0, f_alpha = 0; unsigned char red, green, blue, alpha = 0; int *dst_row = dst->tpixels[dst_offset_y]; if ((m < 1) || (n < 1)) { src_offset_x[0] = n; src_offset_y[0] = m; } else { src_offset_x[0] = n - 1; src_offset_y[0] = m; } src_offset_x[1] = n; src_offset_y[1] = m; if ((m < 1) || (n >= src_w - 1)) { src_offset_x[2] = n; src_offset_y[2] = m; } else { src_offset_x[2] = n + 1; src_offset_y[2] = m; } if ((m < 1) || (n >= src_w - 2)) { src_offset_x[3] = n; src_offset_y[3] = m; } else { src_offset_x[3] = n + 1 + 1; src_offset_y[3] = m; } if (n < 1) { src_offset_x[4] = n; src_offset_y[4] = m; } else { src_offset_x[4] = n - 1; src_offset_y[4] = m; } src_offset_x[5] = n; src_offset_y[5] = m; if (n >= src_w-1) { src_offset_x[6] = n; src_offset_y[6] = m; } else { src_offset_x[6] = n + 1; src_offset_y[6] = m; } if (n >= src_w - 2) { src_offset_x[7] = n; src_offset_y[7] = m; } else { src_offset_x[7] = n + 1 + 1; src_offset_y[7] = m; } if ((m >= src_h - 1) || (n < 1)) { src_offset_x[8] = n; src_offset_y[8] = m; } else { src_offset_x[8] = n - 1; src_offset_y[8] = m; } src_offset_x[9] = n; src_offset_y[9] = m; if ((m >= src_h-1) || (n >= src_w-1)) { src_offset_x[10] = n; src_offset_y[10] = m; } else { src_offset_x[10] = n + 1; src_offset_y[10] = m; } if ((m >= src_h - 1) || (n >= src_w - 2)) { src_offset_x[11] = n; src_offset_y[11] = m; } else { src_offset_x[11] = n + 1 + 1; src_offset_y[11] = m; } if ((m >= src_h - 2) || (n < 1)) { src_offset_x[12] = n; src_offset_y[12] = m; } else { src_offset_x[12] = n - 1; src_offset_y[12] = m; } if (!(m >= src_h - 2)) { src_offset_x[13] = n; src_offset_y[13] = m; } if ((m >= src_h - 2) || (n >= src_w - 1)) { src_offset_x[14] = n; src_offset_y[14] = m; } else { src_offset_x[14] = n + 1; src_offset_y[14] = m; } if ((m >= src_h - 2) || (n >= src_w - 2)) { src_offset_x[15] = n; src_offset_y[15] = m; } else { src_offset_x[15] = n + 1 + 1; src_offset_y[15] = m; } for (k = -1; k < 3; k++) { const gdFixed f = gd_itofx(k)-f_f; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; register gdFixed f_a = 0, f_b = 0, f_d = 0, f_c = 0; register gdFixed f_RY; int l; if (f_fp2 > 0) f_a = gd_mulfx(f_fp2, gd_mulfx(f_fp2,f_fp2)); if (f_fp1 > 0) f_b = gd_mulfx(f_fp1, gd_mulfx(f_fp1,f_fp1)); if (f > 0) f_c = gd_mulfx(f, gd_mulfx(f,f)); if (f_fm1 > 0) f_d = gd_mulfx(f_fm1, gd_mulfx(f_fm1,f_fm1)); f_RY = gd_divfx((f_a - gd_mulfx(f_4,f_b) + gd_mulfx(f_6,f_c) - gd_mulfx(f_4,f_d)),f_6); for (l = -1; l < 3; l++) { const gdFixed f = gd_itofx(l) - f_g; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; register gdFixed f_a = 0, f_b = 0, f_c = 0, f_d = 0; register gdFixed f_RX, f_R, f_rs, f_gs, f_bs, f_ba; register int c; const int _k = ((k+1)*4) + (l+1); if (f_fp2 > 0) f_a = gd_mulfx(f_fp2,gd_mulfx(f_fp2,f_fp2)); if (f_fp1 > 0) f_b = gd_mulfx(f_fp1,gd_mulfx(f_fp1,f_fp1)); if (f > 0) f_c = gd_mulfx(f,gd_mulfx(f,f)); if (f_fm1 > 0) f_d = gd_mulfx(f_fm1,gd_mulfx(f_fm1,f_fm1)); f_RX = gd_divfx((f_a-gd_mulfx(f_4,f_b)+gd_mulfx(f_6,f_c)-gd_mulfx(f_4,f_d)),f_6); f_R = gd_mulfx(f_RY,f_RX); c = src->tpixels[*(src_offset_y + _k)][*(src_offset_x + _k)]; f_rs = gd_itofx(gdTrueColorGetRed(c)); f_gs = gd_itofx(gdTrueColorGetGreen(c)); f_bs = gd_itofx(gdTrueColorGetBlue(c)); f_ba = gd_itofx(gdTrueColorGetAlpha(c)); f_red += gd_mulfx(f_rs,f_R); f_green += gd_mulfx(f_gs,f_R); f_blue += gd_mulfx(f_bs,f_R); f_alpha += gd_mulfx(f_ba,f_R); } } red = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_red, f_gamma)), 0, 255); green = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_green, f_gamma)), 0, 255); blue = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_blue, f_gamma)), 0, 255); alpha = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_alpha, f_gamma)), 0, 127); *(dst_row + dst_offset_x) = gdTrueColorAlpha(red, green, blue, alpha); dst_offset_x++; } dst_offset_y++; } return dst; } /** * Function: gdImageScale * * Scale an image * * Creates a new image, scaled to the requested size using the current * <gdInterpolationMethod>. * * Note that GD_WEIGHTED4 is not yet supported by this function. * * Parameters: * src - The source image. * new_width - The new width. * new_height - The new height. * * Returns: * The scaled image on success, NULL on failure. * * See also: * - <gdImageCopyResized> * - <gdImageCopyResampled> */ BGD_DECLARE(gdImagePtr) gdImageScale(const gdImagePtr src, const unsigned int new_width, const unsigned int new_height) { gdImagePtr im_scaled = NULL; if (src == NULL || (uintmax_t)src->interpolation_id >= GD_METHOD_COUNT) { return NULL; } if (new_width == 0 || new_height == 0) { return NULL; } if (new_width == gdImageSX(src) && new_height == gdImageSY(src)) { return gdImageClone(src); } switch (src->interpolation_id) { /*Special cases, optimized implementations */ case GD_NEAREST_NEIGHBOUR: im_scaled = gdImageScaleNearestNeighbour(src, new_width, new_height); break; case GD_BILINEAR_FIXED: case GD_LINEAR: im_scaled = gdImageScaleBilinear(src, new_width, new_height); break; case GD_BICUBIC_FIXED: case GD_BICUBIC: im_scaled = gdImageScaleBicubicFixed(src, new_width, new_height); break; /* generic */ default: if (src->interpolation == NULL) { return NULL; } im_scaled = gdImageScaleTwoPass(src, new_width, new_height); break; } return im_scaled; } static int gdRotatedImageSize(gdImagePtr src, const float angle, gdRectPtr bbox) { gdRect src_area; double m[6]; gdAffineRotate(m, angle); src_area.x = 0; src_area.y = 0; src_area.width = gdImageSX(src); src_area.height = gdImageSY(src); if (gdTransformAffineBoundingBox(&src_area, m, bbox) != GD_TRUE) { return GD_FALSE; } return GD_TRUE; } static gdImagePtr gdImageRotateNearestNeighbour(gdImagePtr src, const float degrees, const int bgColor) { float _angle = ((float) (-degrees / 180.0f) * (float)M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; gdRect bbox; int new_height, new_width; gdRotatedImageSize(src, degrees, &bbox); new_width = bbox.width; new_height = bbox.height; dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j = 0; j < new_width; j++) { gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; long m = gd_fxtoi(f_m); long n = gd_fxtoi(f_n); if ((m > 0) && (m < src_h-1) && (n > 0) && (n < src_w-1)) { if (dst_offset_y < new_height) { dst->tpixels[dst_offset_y][dst_offset_x++] = src->tpixels[m][n]; } } else { if (dst_offset_y < new_height) { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } } } dst_offset_y++; } return dst; } static gdImagePtr gdImageRotateGeneric(gdImagePtr src, const float degrees, const int bgColor) { float _angle = ((float) (-degrees / 180.0f) * (float)M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; int new_width, new_height; gdRect bbox; if (bgColor < 0) { return NULL; } if (src->interpolation == NULL) { gdImageSetInterpolationMethod(src, GD_DEFAULT); } gdRotatedImageSize(src, degrees, &bbox); new_width = bbox.width; new_height = bbox.height; dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j = 0; j < new_width; j++) { gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_H; gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_W; long m = gd_fxtoi(f_m); long n = gd_fxtoi(f_n); if (m < -1 || n < -1 || m >= src_h || n >= src_w ) { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } else { dst->tpixels[dst_offset_y][dst_offset_x++] = getPixelInterpolated(src, gd_fxtod(f_n), gd_fxtod(f_m), bgColor); } } dst_offset_y++; } return dst; } /** * Function: gdImageRotateInterpolated * * Rotate an image * * Creates a new image, counter-clockwise rotated by the requested angle * using the current <gdInterpolationMethod>. Non-square angles will add a * border with bgcolor. * * Parameters: * src - The source image. * angle - The angle in degrees. * bgcolor - The color to fill the added background with. * * Returns: * The rotated image on success, NULL on failure. * * See also: * - <gdImageCopyRotated> */ BGD_DECLARE(gdImagePtr) gdImageRotateInterpolated(const gdImagePtr src, const float angle, int bgcolor) { /* round to two decimals and keep the 100x multiplication to use it in the common square angles case later. Keep the two decimal precisions so smaller rotation steps can be done, useful for slow animations, f.e. */ const int angle_rounded = fmod((int) floorf(angle * 100), 360 * 100); if (src == NULL || bgcolor < 0) { return NULL; } /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { if (bgcolor < gdMaxColors) { bgcolor = gdTrueColorAlpha(src->red[bgcolor], src->green[bgcolor], src->blue[bgcolor], src->alpha[bgcolor]); } gdImagePaletteToTrueColor(src); } /* 0 && 90 degrees multiple rotation, 0 rotation simply clones the return image and convert it to truecolor, as we must return truecolor image. */ switch (angle_rounded) { case 0: { gdImagePtr dst = gdImageClone(src); if (dst == NULL) { return NULL; } if (dst->trueColor == 0) { gdImagePaletteToTrueColor(dst); } return dst; } case -27000: case 9000: return gdImageRotate90(src, 0); case -18000: case 18000: return gdImageRotate180(src, 0); case -9000: case 27000: return gdImageRotate270(src, 0); } if (src->interpolation_id < 1 || src->interpolation_id > GD_METHOD_COUNT) { return NULL; } switch (src->interpolation_id) { case GD_NEAREST_NEIGHBOUR: return gdImageRotateNearestNeighbour(src, angle, bgcolor); break; case GD_BILINEAR_FIXED: case GD_BICUBIC_FIXED: default: return gdImageRotateGeneric(src, angle, bgcolor); } return NULL; } /** * Group: Affine Transformation **/ static void gdImageClipRectangle(gdImagePtr im, gdRectPtr r) { int c1x, c1y, c2x, c2y; int x1,y1; gdImageGetClip(im, &c1x, &c1y, &c2x, &c2y); x1 = r->x + r->width - 1; y1 = r->y + r->height - 1; r->x = CLAMP(r->x, c1x, c2x); r->y = CLAMP(r->y, c1y, c2y); r->width = CLAMP(x1, c1x, c2x) - r->x + 1; r->height = CLAMP(y1, c1y, c2y) - r->y + 1; } void gdDumpRect(const char *msg, gdRectPtr r) { printf("%s (%i, %i) (%i, %i)\n", msg, r->x, r->y, r->width, r->height); } /** * Function: gdTransformAffineGetImage * Applies an affine transformation to a region and return an image * containing the complete transformation. * * Parameters: * dst - Pointer to a gdImagePtr to store the created image, NULL when * the creation or the transformation failed * src - Source image * src_area - rectangle defining the source region to transform * dstY - Y position in the destination image * affine - The desired affine transformation * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]) { int res; double m[6]; gdRect bbox; gdRect area_full; if (src_area == NULL) { area_full.x = 0; area_full.y = 0; area_full.width = gdImageSX(src); area_full.height = gdImageSY(src); src_area = &area_full; } gdTransformAffineBoundingBox(src_area, affine, &bbox); *dst = gdImageCreateTrueColor(bbox.width, bbox.height); if (*dst == NULL) { return GD_FALSE; } (*dst)->saveAlphaFlag = 1; if (!src->trueColor) { gdImagePaletteToTrueColor(src); } /* Translate to dst origin (0,0) */ gdAffineTranslate(m, -bbox.x, -bbox.y); gdAffineConcat(m, affine, m); gdImageAlphaBlending(*dst, 0); res = gdTransformAffineCopy(*dst, 0,0, src, src_area, m); if (res != GD_TRUE) { gdImageDestroy(*dst); *dst = NULL; return GD_FALSE; } else { return GD_TRUE; } } /** * Function: gdTransformAffineCopy * Applies an affine transformation to a region and copy the result * in a destination to the given position. * * Parameters: * dst - Image to draw the transformed image * src - Source image * dstX - X position in the destination image * dstY - Y position in the destination image * src_area - Rectangular region to rotate in the src image * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineCopy(gdImagePtr dst, int dst_x, int dst_y, const gdImagePtr src, gdRectPtr src_region, const double affine[6]) { int c1x,c1y,c2x,c2y; int backclip = 0; int backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2; register int x, y, src_offset_x, src_offset_y; double inv[6]; int *dst_p; gdPointF pt, src_pt; gdRect bbox; int end_x, end_y; gdInterpolationMethod interpolation_id_bak = GD_DEFAULT; /* These methods use special implementations */ if (src->interpolation_id == GD_BILINEAR_FIXED || src->interpolation_id == GD_BICUBIC_FIXED || src->interpolation_id == GD_NEAREST_NEIGHBOUR) { interpolation_id_bak = src->interpolation_id; gdImageSetInterpolationMethod(src, GD_BICUBIC); } gdImageClipRectangle(src, src_region); if (src_region->x > 0 || src_region->y > 0 || src_region->width < gdImageSX(src) || src_region->height < gdImageSY(src)) { backclip = 1; gdImageGetClip(src, &backup_clipx1, &backup_clipy1, &backup_clipx2, &backup_clipy2); gdImageSetClip(src, src_region->x, src_region->y, src_region->x + src_region->width - 1, src_region->y + src_region->height - 1); } if (!gdTransformAffineBoundingBox(src_region, affine, &bbox)) { if (backclip) { gdImageSetClip(src, backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2); } gdImageSetInterpolationMethod(src, interpolation_id_bak); return GD_FALSE; } gdImageGetClip(dst, &c1x, &c1y, &c2x, &c2y); end_x = bbox.width + abs(bbox.x); end_y = bbox.height + abs(bbox.y); /* Get inverse affine to let us work with destination -> source */ gdAffineInvert(inv, affine); src_offset_x = src_region->x; src_offset_y = src_region->y; if (dst->alphaBlendingFlag) { for (y = bbox.y; y <= end_y; y++) { pt.y = y + 0.5; for (x = 0; x <= end_x; x++) { pt.x = x + 0.5; gdAffineApplyToPointF(&src_pt, &pt, inv); gdImageSetPixel(dst, dst_x + x, dst_y + y, getPixelInterpolated(src, src_offset_x + src_pt.x, src_offset_y + src_pt.y, 0)); } } } else { for (y = 0; y <= end_y; y++) { pt.y = y + 0.5 + bbox.y; if ((dst_y + y) < 0 || ((dst_y + y) > gdImageSY(dst) -1)) { continue; } dst_p = dst->tpixels[dst_y + y] + dst_x; for (x = 0; x <= end_x; x++) { pt.x = x + 0.5 + bbox.x; gdAffineApplyToPointF(&src_pt, &pt, inv); if ((dst_x + x) < 0 || (dst_x + x) > (gdImageSX(dst) - 1)) { break; } *(dst_p++) = getPixelInterpolated(src, src_offset_x + src_pt.x, src_offset_y + src_pt.y, -1); } } } /* Restore clip if required */ if (backclip) { gdImageSetClip(src, backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2); } gdImageSetInterpolationMethod(src, interpolation_id_bak); return GD_TRUE; } /** * Function: gdTransformAffineBoundingBox * Returns the bounding box of an affine transformation applied to a * rectangular area <gdRect> * * Parameters: * src - Rectangular source area for the affine transformation * affine - the affine transformation * bbox - the resulting bounding box * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineBoundingBox(gdRectPtr src, const double affine[6], gdRectPtr bbox) { gdPointF extent[4], min, max, point; int i; extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) src->width; extent[1].y=0.0; extent[2].x=(double) src->width; extent[2].y=(double) src->height; extent[3].x=0.0; extent[3].y=(double) src->height; for (i=0; i < 4; i++) { point=extent[i]; if (gdAffineApplyToPointF(&extent[i], &point, affine) != GD_TRUE) { return GD_FALSE; } } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } bbox->x = (int) min.x; bbox->y = (int) min.y; bbox->width = (int) ceil((max.x - min.x)) + 1; bbox->height = (int) ceil(max.y - min.y) + 1; return GD_TRUE; } /** * Group: Interpolation Method */ /** * Function: gdImageSetInterpolationMethod * * Set the interpolation method for subsequent operations * * Parameters: * im - The image. * id - The interpolation method. * * Returns: * Non-zero on success, zero on failure. * * See also: * - <gdInterpolationMethod> * - <gdImageGetInterpolationMethod> */ BGD_DECLARE(int) gdImageSetInterpolationMethod(gdImagePtr im, gdInterpolationMethod id) { if (im == NULL || (uintmax_t)id > GD_METHOD_COUNT) { return 0; } switch (id) { case GD_NEAREST_NEIGHBOUR: case GD_WEIGHTED4: im->interpolation = NULL; break; /* generic versions*/ /* GD_BILINEAR_FIXED and GD_BICUBIC_FIXED are kept for BC reasons */ case GD_BILINEAR_FIXED: case GD_LINEAR: im->interpolation = filter_linear; break; case GD_BELL: im->interpolation = filter_bell; break; case GD_BESSEL: im->interpolation = filter_bessel; break; case GD_BICUBIC_FIXED: case GD_BICUBIC: im->interpolation = filter_bicubic; break; case GD_BLACKMAN: im->interpolation = filter_blackman; break; case GD_BOX: im->interpolation = filter_box; break; case GD_BSPLINE: im->interpolation = filter_bspline; break; case GD_CATMULLROM: im->interpolation = filter_catmullrom; break; case GD_GAUSSIAN: im->interpolation = filter_gaussian; break; case GD_GENERALIZED_CUBIC: im->interpolation = filter_generalized_cubic; break; case GD_HERMITE: im->interpolation = filter_hermite; break; case GD_HAMMING: im->interpolation = filter_hamming; break; case GD_HANNING: im->interpolation = filter_hanning; break; case GD_MITCHELL: im->interpolation = filter_mitchell; break; case GD_POWER: im->interpolation = filter_power; break; case GD_QUADRATIC: im->interpolation = filter_quadratic; break; case GD_SINC: im->interpolation = filter_sinc; break; case GD_TRIANGLE: im->interpolation = filter_triangle; break; case GD_DEFAULT: id = GD_LINEAR; im->interpolation = filter_linear; default: return 0; break; } im->interpolation_id = id; return 1; } /** * Function: gdImageGetInterpolationMethod * * Get the current interpolation method * * This is here so that the value can be read via a language or VM with an FFI * but no (portable) way to extract the value from the struct. * * Parameters: * im - The image. * * Returns: * The current interpolation method. * * See also: * - <gdInterpolationMethod> * - <gdImageSetInterpolationMethod> */ BGD_DECLARE(gdInterpolationMethod) gdImageGetInterpolationMethod(gdImagePtr im) { return im->interpolation_id; } #ifdef _MSC_VER # pragma optimize("", on) #endif
./CrossVul/dataset_final_sorted/CWE-191/c/bad_4828_0
crossvul-cpp_data_bad_247_0
/** * @file * IMAP helper functions * * @authors * Copyright (C) 1996-1998,2010,2012-2013 Michael R. Elkins <me@mutt.org> * Copyright (C) 1996-1999 Brandon Long <blong@fiction.net> * Copyright (C) 1999-2009,2012 Brendan Cully <brendan@kublai.com> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page imap_util IMAP helper functions * * IMAP helper functions */ #include "config.h" #include <ctype.h> #include <errno.h> #include <netdb.h> #include <netinet/in.h> #include <signal.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include "imap_private.h" #include "mutt/mutt.h" #include "conn/conn.h" #include "bcache.h" #include "context.h" #include "globals.h" #include "header.h" #include "imap/imap.h" #include "mailbox.h" #include "message.h" #include "mutt_account.h" #include "mutt_socket.h" #include "mx.h" #include "options.h" #include "protos.h" #include "url.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif /** * imap_expand_path - Canonicalise an IMAP path * @param path Buffer containing path * @param len Buffer length * @retval 0 Success * @retval -1 Error * * IMAP implementation of mutt_expand_path. Rewrite an IMAP path in canonical * and absolute form. The buffer is rewritten in place with the canonical IMAP * path. * * Function can fail if imap_parse_path() or url_tostring() fail, * of if the buffer isn't large enough. */ int imap_expand_path(char *path, size_t len) { struct ImapMbox mx; struct ImapData *idata = NULL; struct Url url; char fixedpath[LONG_STRING]; int rc; if (imap_parse_path(path, &mx) < 0) return -1; idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); mutt_account_tourl(&mx.account, &url); imap_fix_path(idata, mx.mbox, fixedpath, sizeof(fixedpath)); url.path = fixedpath; rc = url_tostring(&url, path, len, U_DECODE_PASSWD); FREE(&mx.mbox); return rc; } /** * imap_get_parent - Get an IMAP folder's parent * @param output Buffer for the result * @param mbox Mailbox whose parent is to be determined * @param olen Length of the buffer * @param delim Path delimiter */ void imap_get_parent(char *output, const char *mbox, size_t olen, char delim) { int n; /* Make a copy of the mailbox name, but only if the pointers are different */ if (mbox != output) mutt_str_strfcpy(output, mbox, olen); n = mutt_str_strlen(output); /* Let's go backwards until the next delimiter * * If output[n] is a '/', the first n-- will allow us * to ignore it. If it isn't, then output looks like * "/aaaaa/bbbb". There is at least one "b", so we can't skip * the "/" after the 'a's. * * If output == '/', then n-- => n == 0, so the loop ends * immediately */ for (n--; n >= 0 && output[n] != delim; n--) ; /* We stopped before the beginning. There is a trailing * slash. */ if (n > 0) { /* Strip the trailing delimiter. */ output[n] = '\0'; } else { output[0] = (n == 0) ? delim : '\0'; } } /** * imap_get_parent_path - Get the path of the parent folder * @param output Buffer for the result * @param path Mailbox whose parent is to be determined * @param olen Length of the buffer * * Provided an imap path, returns in output the parent directory if * existent. Else returns the same path. */ void imap_get_parent_path(char *output, const char *path, size_t olen) { struct ImapMbox mx; struct ImapData *idata = NULL; char mbox[LONG_STRING] = ""; if (imap_parse_path(path, &mx) < 0) { mutt_str_strfcpy(output, path, olen); return; } idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); if (!idata) { mutt_str_strfcpy(output, path, olen); return; } /* Stores a fixed path in mbox */ imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); /* Gets the parent mbox in mbox */ imap_get_parent(mbox, mbox, sizeof(mbox), idata->delim); /* Returns a fully qualified IMAP url */ imap_qualify_path(output, olen, &mx, mbox); FREE(&mx.mbox); } /** * imap_clean_path - Cleans an IMAP path using imap_fix_path * @param path Path to be cleaned * @param plen Length of the buffer * * Does it in place. */ void imap_clean_path(char *path, size_t plen) { struct ImapMbox mx; struct ImapData *idata = NULL; char mbox[LONG_STRING] = ""; if (imap_parse_path(path, &mx) < 0) return; idata = imap_conn_find(&mx.account, MUTT_IMAP_CONN_NONEW); if (!idata) return; /* Stores a fixed path in mbox */ imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); /* Returns a fully qualified IMAP url */ imap_qualify_path(path, plen, &mx, mbox); } #ifdef USE_HCACHE /** * imap_hcache_namer - Generate a filename for the header cache * @param path Path for the header cache file * @param dest Buffer for result * @param dlen Length of buffer * @retval num Chars written to dest */ static int imap_hcache_namer(const char *path, char *dest, size_t dlen) { return snprintf(dest, dlen, "%s.hcache", path); } /** * imap_hcache_open - Open a header cache * @param idata Server data * @param path Path to the header cache * @retval ptr HeaderCache * @retval NULL Failure */ header_cache_t *imap_hcache_open(struct ImapData *idata, const char *path) { struct ImapMbox mx; struct Url url; char cachepath[PATH_MAX]; char mbox[PATH_MAX]; if (path) imap_cachepath(idata, path, mbox, sizeof(mbox)); else { if (!idata->ctx || imap_parse_path(idata->ctx->path, &mx) < 0) return NULL; imap_cachepath(idata, mx.mbox, mbox, sizeof(mbox)); FREE(&mx.mbox); } if (strstr(mbox, "/../") || (strcmp(mbox, "..") == 0) || (strncmp(mbox, "../", 3) == 0)) return NULL; size_t len = strlen(mbox); if ((len > 3) && (strcmp(mbox + len - 3, "/..") == 0)) return NULL; mutt_account_tourl(&idata->conn->account, &url); url.path = mbox; url_tostring(&url, cachepath, sizeof(cachepath), U_PATH); return mutt_hcache_open(HeaderCache, cachepath, imap_hcache_namer); } /** * imap_hcache_close - Close the header cache * @param idata Server data */ void imap_hcache_close(struct ImapData *idata) { if (!idata->hcache) return; mutt_hcache_close(idata->hcache); idata->hcache = NULL; } /** * imap_hcache_get - Get a header cache entry by its UID * @param idata Server data * @param uid UID to find * @retval ptr Email Header * @retval NULL Failure */ struct Header *imap_hcache_get(struct ImapData *idata, unsigned int uid) { char key[16]; void *uv = NULL; struct Header *h = NULL; if (!idata->hcache) return NULL; sprintf(key, "/%u", uid); uv = mutt_hcache_fetch(idata->hcache, key, imap_hcache_keylen(key)); if (uv) { if (*(unsigned int *) uv == idata->uid_validity) h = mutt_hcache_restore(uv); else mutt_debug(3, "hcache uidvalidity mismatch: %u\n", *(unsigned int *) uv); mutt_hcache_free(idata->hcache, &uv); } return h; } /** * imap_hcache_put - Add an entry to the header cache * @param idata Server data * @param h Email Header * @retval 0 Success * @retval -1 Failure */ int imap_hcache_put(struct ImapData *idata, struct Header *h) { char key[16]; if (!idata->hcache) return -1; sprintf(key, "/%u", HEADER_DATA(h)->uid); return mutt_hcache_store(idata->hcache, key, imap_hcache_keylen(key), h, idata->uid_validity); } /** * imap_hcache_del - Delete an item from the header cache * @param idata Server data * @param uid UID of entry to delete * @retval 0 Success * @retval -1 Failure */ int imap_hcache_del(struct ImapData *idata, unsigned int uid) { char key[16]; if (!idata->hcache) return -1; sprintf(key, "/%u", uid); return mutt_hcache_delete(idata->hcache, key, imap_hcache_keylen(key)); } #endif /** * imap_parse_path - Parse an IMAP mailbox name into name,host,port * @param path Mailbox path to parse * @param mx An IMAP mailbox * @retval 0 Success * @retval -1 Failure * * Given an IMAP mailbox name, return host, port and a path IMAP servers will * recognize. mx.mbox is malloc'd, caller must free it */ int imap_parse_path(const char *path, struct ImapMbox *mx) { static unsigned short ImapPort = 0; static unsigned short ImapsPort = 0; struct servent *service = NULL; struct Url url; char *c = NULL; if (!ImapPort) { service = getservbyname("imap", "tcp"); if (service) ImapPort = ntohs(service->s_port); else ImapPort = IMAP_PORT; mutt_debug(3, "Using default IMAP port %d\n", ImapPort); } if (!ImapsPort) { service = getservbyname("imaps", "tcp"); if (service) ImapsPort = ntohs(service->s_port); else ImapsPort = IMAP_SSL_PORT; mutt_debug(3, "Using default IMAPS port %d\n", ImapsPort); } /* Defaults */ memset(&mx->account, 0, sizeof(mx->account)); mx->account.port = ImapPort; mx->account.type = MUTT_ACCT_TYPE_IMAP; c = mutt_str_strdup(path); url_parse(&url, c); if (url.scheme == U_IMAP || url.scheme == U_IMAPS) { if (mutt_account_fromurl(&mx->account, &url) < 0 || !*mx->account.host) { url_free(&url); FREE(&c); return -1; } mx->mbox = mutt_str_strdup(url.path); if (url.scheme == U_IMAPS) mx->account.flags |= MUTT_ACCT_SSL; url_free(&url); FREE(&c); } /* old PINE-compatibility code */ else { url_free(&url); FREE(&c); char tmp[128]; if (sscanf(path, "{%127[^}]}", tmp) != 1) return -1; c = strchr(path, '}'); if (!c) return -1; else { /* walk past closing '}' */ mx->mbox = mutt_str_strdup(c + 1); } c = strrchr(tmp, '@'); if (c) { *c = '\0'; mutt_str_strfcpy(mx->account.user, tmp, sizeof(mx->account.user)); mutt_str_strfcpy(tmp, c + 1, sizeof(tmp)); mx->account.flags |= MUTT_ACCT_USER; } const int n = sscanf(tmp, "%127[^:/]%127s", mx->account.host, tmp); if (n < 1) { mutt_debug(1, "NULL host in %s\n", path); FREE(&mx->mbox); return -1; } if (n > 1) { if (sscanf(tmp, ":%hu%127s", &(mx->account.port), tmp) >= 1) mx->account.flags |= MUTT_ACCT_PORT; if (sscanf(tmp, "/%s", tmp) == 1) { if (mutt_str_strncmp(tmp, "ssl", 3) == 0) mx->account.flags |= MUTT_ACCT_SSL; else { mutt_debug(1, "Unknown connection type in %s\n", path); FREE(&mx->mbox); return -1; } } } } if ((mx->account.flags & MUTT_ACCT_SSL) && !(mx->account.flags & MUTT_ACCT_PORT)) mx->account.port = ImapsPort; return 0; } /** * imap_mxcmp - Compare mailbox names, giving priority to INBOX * @param mx1 First mailbox name * @param mx2 Second mailbox name * @retval <0 First mailbox precedes Second mailbox * @retval 0 Mailboxes are the same * @retval >0 Second mailbox precedes First mailbox * * Like a normal sort function except that "INBOX" will be sorted to the * beginning of the list. */ int imap_mxcmp(const char *mx1, const char *mx2) { char *b1 = NULL; char *b2 = NULL; int rc; if (!mx1 || !*mx1) mx1 = "INBOX"; if (!mx2 || !*mx2) mx2 = "INBOX"; if ((mutt_str_strcasecmp(mx1, "INBOX") == 0) && (mutt_str_strcasecmp(mx2, "INBOX") == 0)) { return 0; } b1 = mutt_mem_malloc(strlen(mx1) + 1); b2 = mutt_mem_malloc(strlen(mx2) + 1); imap_fix_path(NULL, mx1, b1, strlen(mx1) + 1); imap_fix_path(NULL, mx2, b2, strlen(mx2) + 1); rc = mutt_str_strcmp(b1, b2); FREE(&b1); FREE(&b2); return rc; } /** * imap_pretty_mailbox - Prettify an IMAP mailbox name * @param path Mailbox name to be tidied * * Called by mutt_pretty_mailbox() to make IMAP paths look nice. */ void imap_pretty_mailbox(char *path) { struct ImapMbox home, target; struct Url url; char *delim = NULL; int tlen; int hlen = 0; bool home_match = false; if (imap_parse_path(path, &target) < 0) return; tlen = mutt_str_strlen(target.mbox); /* check whether we can do '=' substitution */ if (mx_is_imap(Folder) && !imap_parse_path(Folder, &home)) { hlen = mutt_str_strlen(home.mbox); if (tlen && mutt_account_match(&home.account, &target.account) && (mutt_str_strncmp(home.mbox, target.mbox, hlen) == 0)) { if (hlen == 0) home_match = true; else if (ImapDelimChars) { for (delim = ImapDelimChars; *delim != '\0'; delim++) if (target.mbox[hlen] == *delim) home_match = true; } } FREE(&home.mbox); } /* do the '=' substitution */ if (home_match) { *path++ = '='; /* copy remaining path, skipping delimiter */ if (hlen == 0) hlen = -1; memcpy(path, target.mbox + hlen + 1, tlen - hlen - 1); path[tlen - hlen - 1] = '\0'; } else { mutt_account_tourl(&target.account, &url); url.path = target.mbox; /* FIXME: That hard-coded constant is bogus. But we need the actual * size of the buffer from mutt_pretty_mailbox. And these pretty * operations usually shrink the result. Still... */ url_tostring(&url, path, 1024, 0); } FREE(&target.mbox); } /** * imap_continue - display a message and ask the user if they want to go on * @param msg Location of the error * @param resp Message for user * @retval num Result: #MUTT_YES, #MUTT_NO, #MUTT_ABORT */ int imap_continue(const char *msg, const char *resp) { imap_error(msg, resp); return mutt_yesorno(_("Continue?"), 0); } /** * imap_error - show an error and abort * @param where Location of the error * @param msg Message for user */ void imap_error(const char *where, const char *msg) { mutt_error("%s [%s]\n", where, msg); } /** * imap_new_idata - Allocate and initialise a new ImapData structure * @retval NULL Failure (no mem) * @retval ptr New ImapData */ struct ImapData *imap_new_idata(void) { struct ImapData *idata = mutt_mem_calloc(1, sizeof(struct ImapData)); idata->cmdbuf = mutt_buffer_new(); idata->cmdslots = ImapPipelineDepth + 2; idata->cmds = mutt_mem_calloc(idata->cmdslots, sizeof(*idata->cmds)); STAILQ_INIT(&idata->flags); STAILQ_INIT(&idata->mboxcache); return idata; } /** * imap_free_idata - Release and clear storage in an ImapData structure * @param idata Server data */ void imap_free_idata(struct ImapData **idata) { if (!idata) return; FREE(&(*idata)->capstr); mutt_list_free(&(*idata)->flags); imap_mboxcache_free(*idata); mutt_buffer_free(&(*idata)->cmdbuf); FREE(&(*idata)->buf); mutt_bcache_close(&(*idata)->bcache); FREE(&(*idata)->cmds); FREE(idata); } /** * imap_fix_path - Fix up the imap path * @param idata Server data * @param mailbox Mailbox path * @param path Buffer for the result * @param plen Length of buffer * @retval ptr Fixed-up path * * This is necessary because the rest of neomutt assumes a hierarchy delimiter of * '/', which is not necessarily true in IMAP. Additionally, the filesystem * converts multiple hierarchy delimiters into a single one, ie "///" is equal * to "/". IMAP servers are not required to do this. * Moreover, IMAP servers may dislike the path ending with the delimiter. */ char *imap_fix_path(struct ImapData *idata, const char *mailbox, char *path, size_t plen) { int i = 0; char delim = '\0'; if (idata) delim = idata->delim; while (mailbox && *mailbox && i < plen - 1) { if ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim)) { /* use connection delimiter if known. Otherwise use user delimiter */ if (!idata) delim = *mailbox; while (*mailbox && ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim))) { mailbox++; } path[i] = delim; } else { path[i] = *mailbox; mailbox++; } i++; } if (i && path[--i] != delim) i++; path[i] = '\0'; return path; } /** * imap_cachepath - Generate a cache path for a mailbox * @param idata Server data * @param mailbox Mailbox name * @param dest Buffer to store cache path * @param dlen Length of buffer */ void imap_cachepath(struct ImapData *idata, const char *mailbox, char *dest, size_t dlen) { char *s = NULL; const char *p = mailbox; for (s = dest; p && *p && dlen; dlen--) { if (*p == idata->delim) { *s = '/'; /* simple way to avoid collisions with UIDs */ if (*(p + 1) >= '0' && *(p + 1) <= '9') { if (--dlen) *++s = '_'; } } else *s = *p; p++; s++; } *s = '\0'; } /** * imap_get_literal_count - write number of bytes in an IMAP literal into bytes * @param[in] buf Number as a string * @param[out] bytes Resulting number * @retval 0 Success * @retval -1 Failure */ int imap_get_literal_count(const char *buf, unsigned int *bytes) { char *pc = NULL; char *pn = NULL; if (!buf || !(pc = strchr(buf, '{'))) return -1; pc++; pn = pc; while (isdigit((unsigned char) *pc)) pc++; *pc = '\0'; if (mutt_str_atoui(pn, bytes) < 0) return -1; return 0; } /** * imap_get_qualifier - Get the qualifier from a tagged response * @param buf Command string to process * @retval ptr Start of the qualifier * * In a tagged response, skip tag and status for the qualifier message. * Used by imap_copy_message for TRYCREATE */ char *imap_get_qualifier(char *buf) { char *s = buf; /* skip tag */ s = imap_next_word(s); /* skip OK/NO/BAD response */ s = imap_next_word(s); return s; } /** * imap_next_word - Find where the next IMAP word begins * @param s Command string to process * @retval ptr Next IMAP word */ char *imap_next_word(char *s) { int quoted = 0; while (*s) { if (*s == '\\') { s++; if (*s) s++; continue; } if (*s == '\"') quoted = quoted ? 0 : 1; if (!quoted && ISSPACE(*s)) break; s++; } SKIPWS(s); return s; } /** * imap_qualify_path - Make an absolute IMAP folder target * @param dest Buffer for the result * @param len Length of buffer * @param mx Imap mailbox * @param path Path relative to the mailbox * * given ImapMbox and relative path. */ void imap_qualify_path(char *dest, size_t len, struct ImapMbox *mx, char *path) { struct Url url; mutt_account_tourl(&mx->account, &url); url.path = path; url_tostring(&url, dest, len, 0); } /** * imap_quote_string - quote string according to IMAP rules * @param dest Buffer for the result * @param dlen Length of the buffer * @param src String to be quoted * * Surround string with quotes, escape " and \ with backslash */ void imap_quote_string(char *dest, size_t dlen, const char *src, bool quote_backtick) { const char *quote = "`\"\\"; if (!quote_backtick) quote++; char *pt = dest; const char *s = src; *pt++ = '"'; /* save room for trailing quote-char */ dlen -= 2; for (; *s && dlen; s++) { if (strchr(quote, *s)) { dlen -= 2; if (dlen == 0) break; *pt++ = '\\'; *pt++ = *s; } else { *pt++ = *s; dlen--; } } *pt++ = '"'; *pt = '\0'; } /** * imap_unquote_string - equally stupid unquoting routine * @param s String to be unquoted */ void imap_unquote_string(char *s) { char *d = s; if (*s == '\"') s++; else return; while (*s) { if (*s == '\"') { *d = '\0'; return; } if (*s == '\\') { s++; } if (*s) { *d = *s; d++; s++; } } *d = '\0'; } /** * imap_munge_mbox_name - Quote awkward characters in a mailbox name * @param idata Server data * @param dest Buffer to store safe mailbox name * @param dlen Length of buffer * @param src Mailbox name */ void imap_munge_mbox_name(struct ImapData *idata, char *dest, size_t dlen, const char *src) { char *buf = mutt_str_strdup(src); imap_utf_encode(idata, &buf); imap_quote_string(dest, dlen, buf, false); FREE(&buf); } /** * imap_unmunge_mbox_name - Remove quoting from a mailbox name * @param idata Server data * @param s Mailbox name * * The string will be altered in-place. */ void imap_unmunge_mbox_name(struct ImapData *idata, char *s) { imap_unquote_string(s); char *buf = mutt_str_strdup(s); if (buf) { imap_utf_decode(idata, &buf); strncpy(s, buf, strlen(s)); } FREE(&buf); } /** * imap_keepalive - poll the current folder to keep the connection alive */ void imap_keepalive(void) { struct Connection *conn = NULL; struct ImapData *idata = NULL; time_t now = time(NULL); TAILQ_FOREACH(conn, mutt_socket_head(), entries) { if (conn->account.type == MUTT_ACCT_TYPE_IMAP) { idata = conn->data; if (idata->state >= IMAP_AUTHENTICATED && now >= idata->lastread + ImapKeepalive) { imap_check(idata, 1); } } } } /** * imap_wait_keepalive - Wait for a process to change state * @param pid Process ID to listen to * @retval num 'wstatus' from waitpid() */ int imap_wait_keepalive(pid_t pid) { struct sigaction oldalrm; struct sigaction act; sigset_t oldmask; int rc; bool imap_passive = ImapPassive; ImapPassive = true; OptKeepQuiet = true; sigprocmask(SIG_SETMASK, NULL, &oldmask); sigemptyset(&act.sa_mask); act.sa_handler = mutt_sig_empty_handler; #ifdef SA_INTERRUPT act.sa_flags = SA_INTERRUPT; #else act.sa_flags = 0; #endif sigaction(SIGALRM, &act, &oldalrm); alarm(ImapKeepalive); while (waitpid(pid, &rc, 0) < 0 && errno == EINTR) { alarm(0); /* cancel a possibly pending alarm */ imap_keepalive(); alarm(ImapKeepalive); } alarm(0); /* cancel a possibly pending alarm */ sigaction(SIGALRM, &oldalrm, NULL); sigprocmask(SIG_SETMASK, &oldmask, NULL); OptKeepQuiet = false; if (!imap_passive) ImapPassive = false; return rc; } /** * imap_allow_reopen - Allow re-opening a folder upon expunge * @param ctx Context */ void imap_allow_reopen(struct Context *ctx) { struct ImapData *idata = NULL; if (!ctx || !ctx->data || ctx->magic != MUTT_IMAP) return; idata = ctx->data; if (idata->ctx == ctx) idata->reopen |= IMAP_REOPEN_ALLOW; } /** * imap_disallow_reopen - Disallow re-opening a folder upon expunge * @param ctx Context */ void imap_disallow_reopen(struct Context *ctx) { struct ImapData *idata = NULL; if (!ctx || !ctx->data || ctx->magic != MUTT_IMAP) return; idata = ctx->data; if (idata->ctx == ctx) idata->reopen &= ~IMAP_REOPEN_ALLOW; } /** * imap_account_match - Compare two Accounts * @param a1 First Account * @param a2 Second Account * @retval true Accounts match */ int imap_account_match(const struct Account *a1, const struct Account *a2) { struct ImapData *a1_idata = imap_conn_find(a1, MUTT_IMAP_CONN_NONEW); struct ImapData *a2_idata = imap_conn_find(a2, MUTT_IMAP_CONN_NONEW); const struct Account *a1_canon = a1_idata == NULL ? a1 : &a1_idata->conn->account; const struct Account *a2_canon = a2_idata == NULL ? a2 : &a2_idata->conn->account; return mutt_account_match(a1_canon, a2_canon); }
./CrossVul/dataset_final_sorted/CWE-191/c/bad_247_0
crossvul-cpp_data_good_4272_0
/* ** $Id: ldebug.c $ ** Debug Interface ** See Copyright Notice in lua.h */ #define ldebug_c #define LUA_CORE #include "lprefix.h" #include <stdarg.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "lcode.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lobject.h" #include "lopcodes.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lvm.h" #define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL) /* inverse of 'pcRel' */ #define invpcRel(pc, p) ((p)->code + (pc) + 1) static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name); static int currentpc (CallInfo *ci) { lua_assert(isLua(ci)); return pcRel(ci->u.l.savedpc, ci_func(ci)->p); } /* ** Get a "base line" to find the line corresponding to an instruction. ** For that, search the array of absolute line info for the largest saved ** instruction smaller or equal to the wanted instruction. A special ** case is when there is no absolute info or the instruction is before ** the first absolute one. */ static int getbaseline (const Proto *f, int pc, int *basepc) { if (f->sizeabslineinfo == 0 || pc < f->abslineinfo[0].pc) { *basepc = -1; /* start from the beginning */ return f->linedefined; } else { unsigned int i; if (pc >= f->abslineinfo[f->sizeabslineinfo - 1].pc) i = f->sizeabslineinfo - 1; /* instruction is after last saved one */ else { /* binary search */ unsigned int j = f->sizeabslineinfo - 1; /* pc < anchorlines[j] */ i = 0; /* abslineinfo[i] <= pc */ while (i < j - 1) { unsigned int m = (j + i) / 2; if (pc >= f->abslineinfo[m].pc) i = m; else j = m; } } *basepc = f->abslineinfo[i].pc; return f->abslineinfo[i].line; } } /* ** Get the line corresponding to instruction 'pc' in function 'f'; ** first gets a base line and from there does the increments until ** the desired instruction. */ int luaG_getfuncline (const Proto *f, int pc) { if (f->lineinfo == NULL) /* no debug information? */ return -1; else { int basepc; int baseline = getbaseline(f, pc, &basepc); while (basepc++ < pc) { /* walk until given instruction */ lua_assert(f->lineinfo[basepc] != ABSLINEINFO); baseline += f->lineinfo[basepc]; /* correct line */ } return baseline; } } static int getcurrentline (CallInfo *ci) { return luaG_getfuncline(ci_func(ci)->p, currentpc(ci)); } /* ** Set 'trap' for all active Lua frames. ** This function can be called during a signal, under "reasonable" ** assumptions. A new 'ci' is completely linked in the list before it ** becomes part of the "active" list, and we assume that pointers are ** atomic; see comment in next function. ** (A compiler doing interprocedural optimizations could, theoretically, ** reorder memory writes in such a way that the list could be ** temporarily broken while inserting a new element. We simply assume it ** has no good reasons to do that.) */ static void settraps (CallInfo *ci) { for (; ci != NULL; ci = ci->previous) if (isLua(ci)) ci->u.l.trap = 1; } /* ** This function can be called during a signal, under "reasonable" ** assumptions. ** Fields 'basehookcount' and 'hookcount' (set by 'resethookcount') ** are for debug only, and it is no problem if they get arbitrary ** values (causes at most one wrong hook call). 'hookmask' is an atomic ** value. We assume that pointers are atomic too (e.g., gcc ensures that ** for all platforms where it runs). Moreover, 'hook' is always checked ** before being called (see 'luaD_hook'). */ LUA_API void lua_sethook (lua_State *L, lua_Hook func, int mask, int count) { if (func == NULL || mask == 0) { /* turn off hooks? */ mask = 0; func = NULL; } L->hook = func; L->basehookcount = count; resethookcount(L); L->hookmask = cast_byte(mask); if (mask) settraps(L->ci); /* to trace inside 'luaV_execute' */ } LUA_API lua_Hook lua_gethook (lua_State *L) { return L->hook; } LUA_API int lua_gethookmask (lua_State *L) { return L->hookmask; } LUA_API int lua_gethookcount (lua_State *L) { return L->basehookcount; } LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) { int status; CallInfo *ci; if (level < 0) return 0; /* invalid (negative) level */ lua_lock(L); for (ci = L->ci; level > 0 && ci != &L->base_ci; ci = ci->previous) level--; if (level == 0 && ci != &L->base_ci) { /* level found? */ status = 1; ar->i_ci = ci; } else status = 0; /* no such level */ lua_unlock(L); return status; } static const char *upvalname (const Proto *p, int uv) { TString *s = check_exp(uv < p->sizeupvalues, p->upvalues[uv].name); if (s == NULL) return "?"; else return getstr(s); } static const char *findvararg (CallInfo *ci, int n, StkId *pos) { if (clLvalue(s2v(ci->func))->p->is_vararg) { int nextra = ci->u.l.nextraargs; if (n >= -nextra) { /* 'n' is negative */ *pos = ci->func - nextra - (n + 1); return "(vararg)"; /* generic name for any vararg */ } } return NULL; /* no such vararg */ } const char *luaG_findlocal (lua_State *L, CallInfo *ci, int n, StkId *pos) { StkId base = ci->func + 1; const char *name = NULL; if (isLua(ci)) { if (n < 0) /* access to vararg values? */ return findvararg(ci, n, pos); else name = luaF_getlocalname(ci_func(ci)->p, n, currentpc(ci)); } if (name == NULL) { /* no 'standard' name? */ StkId limit = (ci == L->ci) ? L->top : ci->next->func; if (limit - base >= n && n > 0) { /* is 'n' inside 'ci' stack? */ /* generic name for any valid slot */ name = isLua(ci) ? "(temporary)" : "(C temporary)"; } else return NULL; /* no name */ } if (pos) *pos = base + (n - 1); return name; } LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) { const char *name; lua_lock(L); if (ar == NULL) { /* information about non-active function? */ if (!isLfunction(s2v(L->top - 1))) /* not a Lua function? */ name = NULL; else /* consider live variables at function start (parameters) */ name = luaF_getlocalname(clLvalue(s2v(L->top - 1))->p, n, 0); } else { /* active function; get information through 'ar' */ StkId pos = NULL; /* to avoid warnings */ name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, L->top, pos); api_incr_top(L); } } lua_unlock(L); return name; } LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) { StkId pos = NULL; /* to avoid warnings */ const char *name; lua_lock(L); name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, pos, L->top - 1); L->top--; /* pop value */ } lua_unlock(L); return name; } static void funcinfo (lua_Debug *ar, Closure *cl) { if (noLuaClosure(cl)) { ar->source = "=[C]"; ar->srclen = LL("=[C]"); ar->linedefined = -1; ar->lastlinedefined = -1; ar->what = "C"; } else { const Proto *p = cl->l.p; if (p->source) { ar->source = getstr(p->source); ar->srclen = tsslen(p->source); } else { ar->source = "=?"; ar->srclen = LL("=?"); } ar->linedefined = p->linedefined; ar->lastlinedefined = p->lastlinedefined; ar->what = (ar->linedefined == 0) ? "main" : "Lua"; } luaO_chunkid(ar->short_src, ar->source, ar->srclen); } static int nextline (const Proto *p, int currentline, int pc) { if (p->lineinfo[pc] != ABSLINEINFO) return currentline + p->lineinfo[pc]; else return luaG_getfuncline(p, pc); } static void collectvalidlines (lua_State *L, Closure *f) { if (noLuaClosure(f)) { setnilvalue(s2v(L->top)); api_incr_top(L); } else { int i; TValue v; const Proto *p = f->l.p; int currentline = p->linedefined; Table *t = luaH_new(L); /* new table to store active lines */ sethvalue2s(L, L->top, t); /* push it on stack */ api_incr_top(L); setbtvalue(&v); /* boolean 'true' to be the value of all indices */ for (i = 0; i < p->sizelineinfo; i++) { /* for all lines with code */ currentline = nextline(p, currentline, i); luaH_setint(L, t, currentline, &v); /* table[line] = true */ } } } static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { if (ci == NULL) /* no 'ci'? */ return NULL; /* no info */ else if (ci->callstatus & CIST_FIN) { /* is this a finalizer? */ *name = "__gc"; return "metamethod"; /* report it as such */ } /* calling function is a known Lua function? */ else if (!(ci->callstatus & CIST_TAIL) && isLua(ci->previous)) return funcnamefromcode(L, ci->previous, name); else return NULL; /* no way to find a name */ } static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar, Closure *f, CallInfo *ci) { int status = 1; for (; *what; what++) { switch (*what) { case 'S': { funcinfo(ar, f); break; } case 'l': { ar->currentline = (ci && isLua(ci)) ? getcurrentline(ci) : -1; break; } case 'u': { ar->nups = (f == NULL) ? 0 : f->c.nupvalues; if (noLuaClosure(f)) { ar->isvararg = 1; ar->nparams = 0; } else { ar->isvararg = f->l.p->is_vararg; ar->nparams = f->l.p->numparams; } break; } case 't': { ar->istailcall = (ci) ? ci->callstatus & CIST_TAIL : 0; break; } case 'n': { ar->namewhat = getfuncname(L, ci, &ar->name); if (ar->namewhat == NULL) { ar->namewhat = ""; /* not found */ ar->name = NULL; } break; } case 'r': { if (ci == NULL || !(ci->callstatus & CIST_TRAN)) ar->ftransfer = ar->ntransfer = 0; else { ar->ftransfer = ci->u2.transferinfo.ftransfer; ar->ntransfer = ci->u2.transferinfo.ntransfer; } break; } case 'L': case 'f': /* handled by lua_getinfo */ break; default: status = 0; /* invalid option */ } } return status; } LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) { int status; Closure *cl; CallInfo *ci; TValue *func; lua_lock(L); if (*what == '>') { ci = NULL; func = s2v(L->top - 1); api_check(L, ttisfunction(func), "function expected"); what++; /* skip the '>' */ L->top--; /* pop function */ } else { ci = ar->i_ci; func = s2v(ci->func); lua_assert(ttisfunction(func)); } cl = ttisclosure(func) ? clvalue(func) : NULL; status = auxgetinfo(L, what, ar, cl, ci); if (strchr(what, 'f')) { setobj2s(L, L->top, func); api_incr_top(L); } if (strchr(what, 'L')) collectvalidlines(L, cl); lua_unlock(L); return status; } /* ** {====================================================== ** Symbolic Execution ** ======================================================= */ static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name); /* ** Find a "name" for the constant 'c'. */ static void kname (const Proto *p, int c, const char **name) { TValue *kvalue = &p->k[c]; *name = (ttisstring(kvalue)) ? svalue(kvalue) : "?"; } /* ** Find a "name" for the register 'c'. */ static void rname (const Proto *p, int pc, int c, const char **name) { const char *what = getobjname(p, pc, c, name); /* search for 'c' */ if (!(what && *what == 'c')) /* did not find a constant name? */ *name = "?"; } /* ** Find a "name" for a 'C' value in an RK instruction. */ static void rkname (const Proto *p, int pc, Instruction i, const char **name) { int c = GETARG_C(i); /* key index */ if (GETARG_k(i)) /* is 'c' a constant? */ kname(p, c, name); else /* 'c' is a register */ rname(p, pc, c, name); } static int filterpc (int pc, int jmptarget) { if (pc < jmptarget) /* is code conditional (inside a jump)? */ return -1; /* cannot know who sets that register */ else return pc; /* current position sets that register */ } /* ** Try to find last instruction before 'lastpc' that modified register 'reg'. */ static int findsetreg (const Proto *p, int lastpc, int reg) { int pc; int setreg = -1; /* keep last instruction that changed 'reg' */ int jmptarget = 0; /* any code before this address is conditional */ if (testMMMode(GET_OPCODE(p->code[lastpc]))) lastpc--; /* previous instruction was not actually executed */ for (pc = 0; pc < lastpc; pc++) { Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); int a = GETARG_A(i); int change; /* true if current instruction changed 'reg' */ switch (op) { case OP_LOADNIL: { /* set registers from 'a' to 'a+b' */ int b = GETARG_B(i); change = (a <= reg && reg <= a + b); break; } case OP_TFORCALL: { /* affect all regs above its base */ change = (reg >= a + 2); break; } case OP_CALL: case OP_TAILCALL: { /* affect all registers above base */ change = (reg >= a); break; } case OP_JMP: { /* doesn't change registers, but changes 'jmptarget' */ int b = GETARG_sJ(i); int dest = pc + 1 + b; /* jump does not skip 'lastpc' and is larger than current one? */ if (dest <= lastpc && dest > jmptarget) jmptarget = dest; /* update 'jmptarget' */ change = 0; break; } default: /* any instruction that sets A */ change = (testAMode(op) && reg == a); break; } if (change) setreg = filterpc(pc, jmptarget); } return setreg; } /* ** Check whether table being indexed by instruction 'i' is the ** environment '_ENV' */ static const char *gxf (const Proto *p, int pc, Instruction i, int isup) { int t = GETARG_B(i); /* table index */ const char *name; /* name of indexed variable */ if (isup) /* is an upvalue? */ name = upvalname(p, t); else getobjname(p, pc, t, &name); return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field"; } static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name) { int pc; *name = luaF_getlocalname(p, reg + 1, lastpc); if (*name) /* is a local? */ return "local"; /* else try symbolic execution */ pc = findsetreg(p, lastpc, reg); if (pc != -1) { /* could find instruction? */ Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); switch (op) { case OP_MOVE: { int b = GETARG_B(i); /* move from 'b' to 'a' */ if (b < GETARG_A(i)) return getobjname(p, pc, b, name); /* get name for 'b' */ break; } case OP_GETTABUP: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 1); } case OP_GETTABLE: { int k = GETARG_C(i); /* key index */ rname(p, pc, k, name); return gxf(p, pc, i, 0); } case OP_GETI: { *name = "integer index"; return "field"; } case OP_GETFIELD: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 0); } case OP_GETUPVAL: { *name = upvalname(p, GETARG_B(i)); return "upvalue"; } case OP_LOADK: case OP_LOADKX: { int b = (op == OP_LOADK) ? GETARG_Bx(i) : GETARG_Ax(p->code[pc + 1]); if (ttisstring(&p->k[b])) { *name = svalue(&p->k[b]); return "constant"; } break; } case OP_SELF: { rkname(p, pc, i, name); return "method"; } default: break; /* go through to return NULL */ } } return NULL; /* could not find reasonable name */ } /* ** Try to find a name for a function based on the code that called it. ** (Only works when function was called by a Lua function.) ** Returns what the name is (e.g., "for iterator", "method", ** "metamethod") and sets '*name' to point to the name. */ static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name) { TMS tm = (TMS)0; /* (initial value avoids warnings) */ const Proto *p = ci_func(ci)->p; /* calling function */ int pc = currentpc(ci); /* calling instruction index */ Instruction i = p->code[pc]; /* calling instruction */ if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */ *name = "?"; return "hook"; } switch (GET_OPCODE(i)) { case OP_CALL: case OP_TAILCALL: return getobjname(p, pc, GETARG_A(i), name); /* get function name */ case OP_TFORCALL: { /* for iterator */ *name = "for iterator"; return "for iterator"; } /* other instructions can do calls through metamethods */ case OP_SELF: case OP_GETTABUP: case OP_GETTABLE: case OP_GETI: case OP_GETFIELD: tm = TM_INDEX; break; case OP_SETTABUP: case OP_SETTABLE: case OP_SETI: case OP_SETFIELD: tm = TM_NEWINDEX; break; case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: { tm = cast(TMS, GETARG_C(i)); break; } case OP_UNM: tm = TM_UNM; break; case OP_BNOT: tm = TM_BNOT; break; case OP_LEN: tm = TM_LEN; break; case OP_CONCAT: tm = TM_CONCAT; break; case OP_EQ: tm = TM_EQ; break; case OP_LT: case OP_LE: case OP_LTI: case OP_LEI: *name = "order"; /* '<=' can call '__lt', etc. */ return "metamethod"; case OP_CLOSE: case OP_RETURN: *name = "close"; return "metamethod"; default: return NULL; /* cannot find a reasonable name */ } *name = getstr(G(L)->tmname[tm]) + 2; return "metamethod"; } /* }====================================================== */ /* ** The subtraction of two potentially unrelated pointers is ** not ISO C, but it should not crash a program; the subsequent ** checks are ISO C and ensure a correct result. */ static int isinstack (CallInfo *ci, const TValue *o) { StkId base = ci->func + 1; ptrdiff_t i = cast(StkId, o) - base; return (0 <= i && i < (ci->top - base) && s2v(base + i) == o); } /* ** Checks whether value 'o' came from an upvalue. (That can only happen ** with instructions OP_GETTABUP/OP_SETTABUP, which operate directly on ** upvalues.) */ static const char *getupvalname (CallInfo *ci, const TValue *o, const char **name) { LClosure *c = ci_func(ci); int i; for (i = 0; i < c->nupvalues; i++) { if (c->upvals[i]->v == o) { *name = upvalname(c->p, i); return "upvalue"; } } return NULL; } static const char *varinfo (lua_State *L, const TValue *o) { const char *name = NULL; /* to avoid warnings */ CallInfo *ci = L->ci; const char *kind = NULL; if (isLua(ci)) { kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */ if (!kind && isinstack(ci, o)) /* no? try a register */ kind = getobjname(ci_func(ci)->p, currentpc(ci), cast_int(cast(StkId, o) - (ci->func + 1)), &name); } return (kind) ? luaO_pushfstring(L, " (%s '%s')", kind, name) : ""; } l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) { const char *t = luaT_objtypename(L, o); luaG_runerror(L, "attempt to %s a %s value%s", op, t, varinfo(L, o)); } l_noret luaG_forerror (lua_State *L, const TValue *o, const char *what) { luaG_runerror(L, "bad 'for' %s (number expected, got %s)", what, luaT_objtypename(L, o)); } l_noret luaG_concaterror (lua_State *L, const TValue *p1, const TValue *p2) { if (ttisstring(p1) || cvt2str(p1)) p1 = p2; luaG_typeerror(L, p1, "concatenate"); } l_noret luaG_opinterror (lua_State *L, const TValue *p1, const TValue *p2, const char *msg) { if (!ttisnumber(p1)) /* first operand is wrong? */ p2 = p1; /* now second is wrong */ luaG_typeerror(L, p2, msg); } /* ** Error when both values are convertible to numbers, but not to integers */ l_noret luaG_tointerror (lua_State *L, const TValue *p1, const TValue *p2) { lua_Integer temp; if (!tointegerns(p1, &temp)) p2 = p1; luaG_runerror(L, "number%s has no integer representation", varinfo(L, p2)); } l_noret luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) { const char *t1 = luaT_objtypename(L, p1); const char *t2 = luaT_objtypename(L, p2); if (strcmp(t1, t2) == 0) luaG_runerror(L, "attempt to compare two %s values", t1); else luaG_runerror(L, "attempt to compare %s with %s", t1, t2); } /* add src:line information to 'msg' */ const char *luaG_addinfo (lua_State *L, const char *msg, TString *src, int line) { char buff[LUA_IDSIZE]; if (src) luaO_chunkid(buff, getstr(src), tsslen(src)); else { /* no source available; use "?" instead */ buff[0] = '?'; buff[1] = '\0'; } return luaO_pushfstring(L, "%s:%d: %s", buff, line, msg); } l_noret luaG_errormsg (lua_State *L) { if (L->errfunc != 0) { /* is there an error handling function? */ StkId errfunc = restorestack(L, L->errfunc); lua_assert(ttisfunction(s2v(errfunc))); setobjs2s(L, L->top, L->top - 1); /* move argument */ setobjs2s(L, L->top - 1, errfunc); /* push function */ L->top++; /* assume EXTRA_STACK */ luaD_callnoyield(L, L->top - 2, 1); /* call it */ } luaD_throw(L, LUA_ERRRUN); } l_noret luaG_runerror (lua_State *L, const char *fmt, ...) { CallInfo *ci = L->ci; const char *msg; va_list argp; luaC_checkGC(L); /* error message uses memory */ va_start(argp, fmt); msg = luaO_pushvfstring(L, fmt, argp); /* format message */ va_end(argp); if (isLua(ci)) /* if Lua function, add source:line information */ luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci)); luaG_errormsg(L); } /* ** Check whether new instruction 'newpc' is in a different line from ** previous instruction 'oldpc'. */ static int changedline (const Proto *p, int oldpc, int newpc) { while (oldpc++ < newpc) { if (p->lineinfo[oldpc] != 0) return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc)); } return 0; /* no line changes in the way */ } /* ** Traces the execution of a Lua function. Called before the execution ** of each opcode, when debug is on. 'L->oldpc' stores the last ** instruction traced, to detect line changes. When entering a new ** function, 'npci' will be zero and will test as a new line without ** the need for 'oldpc'; so, 'oldpc' does not need to be initialized ** before. Some exceptional conditions may return to a function without ** updating 'oldpc'. In that case, 'oldpc' may be invalid; if so, it is ** reset to zero. (A wrong but valid 'oldpc' at most causes an extra ** call to a line hook.) */ int luaG_traceexec (lua_State *L, const Instruction *pc) { CallInfo *ci = L->ci; lu_byte mask = L->hookmask; const Proto *p = ci_func(ci)->p; int counthook; /* 'L->oldpc' may be invalid; reset it in this case */ int oldpc = (L->oldpc < p->sizecode) ? L->oldpc : 0; if (!(mask & (LUA_MASKLINE | LUA_MASKCOUNT))) { /* no hooks? */ ci->u.l.trap = 0; /* don't need to stop again */ return 0; /* turn off 'trap' */ } pc++; /* reference is always next instruction */ ci->u.l.savedpc = pc; /* save 'pc' */ counthook = (--L->hookcount == 0 && (mask & LUA_MASKCOUNT)); if (counthook) resethookcount(L); /* reset count */ else if (!(mask & LUA_MASKLINE)) return 1; /* no line hook and count != 0; nothing to be done now */ if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */ ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */ return 1; /* do not call hook again (VM yielded, so it did not move) */ } if (!isIT(*(ci->u.l.savedpc - 1))) L->top = ci->top; /* prepare top */ if (counthook) luaD_hook(L, LUA_HOOKCOUNT, -1, 0, 0); /* call count hook */ if (mask & LUA_MASKLINE) { int npci = pcRel(pc, p); if (npci == 0 || /* call linehook when enter a new function, */ pc <= invpcRel(oldpc, p) || /* when jump back (loop), or when */ changedline(p, oldpc, npci)) { /* enter new line */ int newline = luaG_getfuncline(p, npci); luaD_hook(L, LUA_HOOKLINE, newline, 0, 0); /* call line hook */ } L->oldpc = npci; /* 'pc' of last call to line hook */ } if (L->status == LUA_YIELD) { /* did hook yield? */ if (counthook) L->hookcount = 1; /* undo decrement to zero */ ci->u.l.savedpc--; /* undo increment (resume will increment it again) */ ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */ luaD_throw(L, LUA_YIELD); } return 1; /* keep 'trap' on */ }
./CrossVul/dataset_final_sorted/CWE-191/c/good_4272_0
crossvul-cpp_data_bad_4272_0
/* ** $Id: ldebug.c $ ** Debug Interface ** See Copyright Notice in lua.h */ #define ldebug_c #define LUA_CORE #include "lprefix.h" #include <stdarg.h> #include <stddef.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "lcode.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lobject.h" #include "lopcodes.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lvm.h" #define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL) /* inverse of 'pcRel' */ #define invpcRel(pc, p) ((p)->code + (pc) + 1) static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name); static int currentpc (CallInfo *ci) { lua_assert(isLua(ci)); return pcRel(ci->u.l.savedpc, ci_func(ci)->p); } /* ** Get a "base line" to find the line corresponding to an instruction. ** For that, search the array of absolute line info for the largest saved ** instruction smaller or equal to the wanted instruction. A special ** case is when there is no absolute info or the instruction is before ** the first absolute one. */ static int getbaseline (const Proto *f, int pc, int *basepc) { if (f->sizeabslineinfo == 0 || pc < f->abslineinfo[0].pc) { *basepc = -1; /* start from the beginning */ return f->linedefined; } else { unsigned int i; if (pc >= f->abslineinfo[f->sizeabslineinfo - 1].pc) i = f->sizeabslineinfo - 1; /* instruction is after last saved one */ else { /* binary search */ unsigned int j = f->sizeabslineinfo - 1; /* pc < anchorlines[j] */ i = 0; /* abslineinfo[i] <= pc */ while (i < j - 1) { unsigned int m = (j + i) / 2; if (pc >= f->abslineinfo[m].pc) i = m; else j = m; } } *basepc = f->abslineinfo[i].pc; return f->abslineinfo[i].line; } } /* ** Get the line corresponding to instruction 'pc' in function 'f'; ** first gets a base line and from there does the increments until ** the desired instruction. */ int luaG_getfuncline (const Proto *f, int pc) { if (f->lineinfo == NULL) /* no debug information? */ return -1; else { int basepc; int baseline = getbaseline(f, pc, &basepc); while (basepc++ < pc) { /* walk until given instruction */ lua_assert(f->lineinfo[basepc] != ABSLINEINFO); baseline += f->lineinfo[basepc]; /* correct line */ } return baseline; } } static int getcurrentline (CallInfo *ci) { return luaG_getfuncline(ci_func(ci)->p, currentpc(ci)); } /* ** Set 'trap' for all active Lua frames. ** This function can be called during a signal, under "reasonable" ** assumptions. A new 'ci' is completely linked in the list before it ** becomes part of the "active" list, and we assume that pointers are ** atomic; see comment in next function. ** (A compiler doing interprocedural optimizations could, theoretically, ** reorder memory writes in such a way that the list could be ** temporarily broken while inserting a new element. We simply assume it ** has no good reasons to do that.) */ static void settraps (CallInfo *ci) { for (; ci != NULL; ci = ci->previous) if (isLua(ci)) ci->u.l.trap = 1; } /* ** This function can be called during a signal, under "reasonable" ** assumptions. ** Fields 'basehookcount' and 'hookcount' (set by 'resethookcount') ** are for debug only, and it is no problem if they get arbitrary ** values (causes at most one wrong hook call). 'hookmask' is an atomic ** value. We assume that pointers are atomic too (e.g., gcc ensures that ** for all platforms where it runs). Moreover, 'hook' is always checked ** before being called (see 'luaD_hook'). */ LUA_API void lua_sethook (lua_State *L, lua_Hook func, int mask, int count) { if (func == NULL || mask == 0) { /* turn off hooks? */ mask = 0; func = NULL; } L->hook = func; L->basehookcount = count; resethookcount(L); L->hookmask = cast_byte(mask); if (mask) settraps(L->ci); /* to trace inside 'luaV_execute' */ } LUA_API lua_Hook lua_gethook (lua_State *L) { return L->hook; } LUA_API int lua_gethookmask (lua_State *L) { return L->hookmask; } LUA_API int lua_gethookcount (lua_State *L) { return L->basehookcount; } LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) { int status; CallInfo *ci; if (level < 0) return 0; /* invalid (negative) level */ lua_lock(L); for (ci = L->ci; level > 0 && ci != &L->base_ci; ci = ci->previous) level--; if (level == 0 && ci != &L->base_ci) { /* level found? */ status = 1; ar->i_ci = ci; } else status = 0; /* no such level */ lua_unlock(L); return status; } static const char *upvalname (const Proto *p, int uv) { TString *s = check_exp(uv < p->sizeupvalues, p->upvalues[uv].name); if (s == NULL) return "?"; else return getstr(s); } static const char *findvararg (CallInfo *ci, int n, StkId *pos) { if (clLvalue(s2v(ci->func))->p->is_vararg) { int nextra = ci->u.l.nextraargs; if (n <= nextra) { *pos = ci->func - nextra + (n - 1); return "(vararg)"; /* generic name for any vararg */ } } return NULL; /* no such vararg */ } const char *luaG_findlocal (lua_State *L, CallInfo *ci, int n, StkId *pos) { StkId base = ci->func + 1; const char *name = NULL; if (isLua(ci)) { if (n < 0) /* access to vararg values? */ return findvararg(ci, -n, pos); else name = luaF_getlocalname(ci_func(ci)->p, n, currentpc(ci)); } if (name == NULL) { /* no 'standard' name? */ StkId limit = (ci == L->ci) ? L->top : ci->next->func; if (limit - base >= n && n > 0) { /* is 'n' inside 'ci' stack? */ /* generic name for any valid slot */ name = isLua(ci) ? "(temporary)" : "(C temporary)"; } else return NULL; /* no name */ } if (pos) *pos = base + (n - 1); return name; } LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) { const char *name; lua_lock(L); if (ar == NULL) { /* information about non-active function? */ if (!isLfunction(s2v(L->top - 1))) /* not a Lua function? */ name = NULL; else /* consider live variables at function start (parameters) */ name = luaF_getlocalname(clLvalue(s2v(L->top - 1))->p, n, 0); } else { /* active function; get information through 'ar' */ StkId pos = NULL; /* to avoid warnings */ name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, L->top, pos); api_incr_top(L); } } lua_unlock(L); return name; } LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) { StkId pos = NULL; /* to avoid warnings */ const char *name; lua_lock(L); name = luaG_findlocal(L, ar->i_ci, n, &pos); if (name) { setobjs2s(L, pos, L->top - 1); L->top--; /* pop value */ } lua_unlock(L); return name; } static void funcinfo (lua_Debug *ar, Closure *cl) { if (noLuaClosure(cl)) { ar->source = "=[C]"; ar->srclen = LL("=[C]"); ar->linedefined = -1; ar->lastlinedefined = -1; ar->what = "C"; } else { const Proto *p = cl->l.p; if (p->source) { ar->source = getstr(p->source); ar->srclen = tsslen(p->source); } else { ar->source = "=?"; ar->srclen = LL("=?"); } ar->linedefined = p->linedefined; ar->lastlinedefined = p->lastlinedefined; ar->what = (ar->linedefined == 0) ? "main" : "Lua"; } luaO_chunkid(ar->short_src, ar->source, ar->srclen); } static int nextline (const Proto *p, int currentline, int pc) { if (p->lineinfo[pc] != ABSLINEINFO) return currentline + p->lineinfo[pc]; else return luaG_getfuncline(p, pc); } static void collectvalidlines (lua_State *L, Closure *f) { if (noLuaClosure(f)) { setnilvalue(s2v(L->top)); api_incr_top(L); } else { int i; TValue v; const Proto *p = f->l.p; int currentline = p->linedefined; Table *t = luaH_new(L); /* new table to store active lines */ sethvalue2s(L, L->top, t); /* push it on stack */ api_incr_top(L); setbtvalue(&v); /* boolean 'true' to be the value of all indices */ for (i = 0; i < p->sizelineinfo; i++) { /* for all lines with code */ currentline = nextline(p, currentline, i); luaH_setint(L, t, currentline, &v); /* table[line] = true */ } } } static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { if (ci == NULL) /* no 'ci'? */ return NULL; /* no info */ else if (ci->callstatus & CIST_FIN) { /* is this a finalizer? */ *name = "__gc"; return "metamethod"; /* report it as such */ } /* calling function is a known Lua function? */ else if (!(ci->callstatus & CIST_TAIL) && isLua(ci->previous)) return funcnamefromcode(L, ci->previous, name); else return NULL; /* no way to find a name */ } static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar, Closure *f, CallInfo *ci) { int status = 1; for (; *what; what++) { switch (*what) { case 'S': { funcinfo(ar, f); break; } case 'l': { ar->currentline = (ci && isLua(ci)) ? getcurrentline(ci) : -1; break; } case 'u': { ar->nups = (f == NULL) ? 0 : f->c.nupvalues; if (noLuaClosure(f)) { ar->isvararg = 1; ar->nparams = 0; } else { ar->isvararg = f->l.p->is_vararg; ar->nparams = f->l.p->numparams; } break; } case 't': { ar->istailcall = (ci) ? ci->callstatus & CIST_TAIL : 0; break; } case 'n': { ar->namewhat = getfuncname(L, ci, &ar->name); if (ar->namewhat == NULL) { ar->namewhat = ""; /* not found */ ar->name = NULL; } break; } case 'r': { if (ci == NULL || !(ci->callstatus & CIST_TRAN)) ar->ftransfer = ar->ntransfer = 0; else { ar->ftransfer = ci->u2.transferinfo.ftransfer; ar->ntransfer = ci->u2.transferinfo.ntransfer; } break; } case 'L': case 'f': /* handled by lua_getinfo */ break; default: status = 0; /* invalid option */ } } return status; } LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) { int status; Closure *cl; CallInfo *ci; TValue *func; lua_lock(L); if (*what == '>') { ci = NULL; func = s2v(L->top - 1); api_check(L, ttisfunction(func), "function expected"); what++; /* skip the '>' */ L->top--; /* pop function */ } else { ci = ar->i_ci; func = s2v(ci->func); lua_assert(ttisfunction(func)); } cl = ttisclosure(func) ? clvalue(func) : NULL; status = auxgetinfo(L, what, ar, cl, ci); if (strchr(what, 'f')) { setobj2s(L, L->top, func); api_incr_top(L); } if (strchr(what, 'L')) collectvalidlines(L, cl); lua_unlock(L); return status; } /* ** {====================================================== ** Symbolic Execution ** ======================================================= */ static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name); /* ** Find a "name" for the constant 'c'. */ static void kname (const Proto *p, int c, const char **name) { TValue *kvalue = &p->k[c]; *name = (ttisstring(kvalue)) ? svalue(kvalue) : "?"; } /* ** Find a "name" for the register 'c'. */ static void rname (const Proto *p, int pc, int c, const char **name) { const char *what = getobjname(p, pc, c, name); /* search for 'c' */ if (!(what && *what == 'c')) /* did not find a constant name? */ *name = "?"; } /* ** Find a "name" for a 'C' value in an RK instruction. */ static void rkname (const Proto *p, int pc, Instruction i, const char **name) { int c = GETARG_C(i); /* key index */ if (GETARG_k(i)) /* is 'c' a constant? */ kname(p, c, name); else /* 'c' is a register */ rname(p, pc, c, name); } static int filterpc (int pc, int jmptarget) { if (pc < jmptarget) /* is code conditional (inside a jump)? */ return -1; /* cannot know who sets that register */ else return pc; /* current position sets that register */ } /* ** Try to find last instruction before 'lastpc' that modified register 'reg'. */ static int findsetreg (const Proto *p, int lastpc, int reg) { int pc; int setreg = -1; /* keep last instruction that changed 'reg' */ int jmptarget = 0; /* any code before this address is conditional */ if (testMMMode(GET_OPCODE(p->code[lastpc]))) lastpc--; /* previous instruction was not actually executed */ for (pc = 0; pc < lastpc; pc++) { Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); int a = GETARG_A(i); int change; /* true if current instruction changed 'reg' */ switch (op) { case OP_LOADNIL: { /* set registers from 'a' to 'a+b' */ int b = GETARG_B(i); change = (a <= reg && reg <= a + b); break; } case OP_TFORCALL: { /* affect all regs above its base */ change = (reg >= a + 2); break; } case OP_CALL: case OP_TAILCALL: { /* affect all registers above base */ change = (reg >= a); break; } case OP_JMP: { /* doesn't change registers, but changes 'jmptarget' */ int b = GETARG_sJ(i); int dest = pc + 1 + b; /* jump does not skip 'lastpc' and is larger than current one? */ if (dest <= lastpc && dest > jmptarget) jmptarget = dest; /* update 'jmptarget' */ change = 0; break; } default: /* any instruction that sets A */ change = (testAMode(op) && reg == a); break; } if (change) setreg = filterpc(pc, jmptarget); } return setreg; } /* ** Check whether table being indexed by instruction 'i' is the ** environment '_ENV' */ static const char *gxf (const Proto *p, int pc, Instruction i, int isup) { int t = GETARG_B(i); /* table index */ const char *name; /* name of indexed variable */ if (isup) /* is an upvalue? */ name = upvalname(p, t); else getobjname(p, pc, t, &name); return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field"; } static const char *getobjname (const Proto *p, int lastpc, int reg, const char **name) { int pc; *name = luaF_getlocalname(p, reg + 1, lastpc); if (*name) /* is a local? */ return "local"; /* else try symbolic execution */ pc = findsetreg(p, lastpc, reg); if (pc != -1) { /* could find instruction? */ Instruction i = p->code[pc]; OpCode op = GET_OPCODE(i); switch (op) { case OP_MOVE: { int b = GETARG_B(i); /* move from 'b' to 'a' */ if (b < GETARG_A(i)) return getobjname(p, pc, b, name); /* get name for 'b' */ break; } case OP_GETTABUP: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 1); } case OP_GETTABLE: { int k = GETARG_C(i); /* key index */ rname(p, pc, k, name); return gxf(p, pc, i, 0); } case OP_GETI: { *name = "integer index"; return "field"; } case OP_GETFIELD: { int k = GETARG_C(i); /* key index */ kname(p, k, name); return gxf(p, pc, i, 0); } case OP_GETUPVAL: { *name = upvalname(p, GETARG_B(i)); return "upvalue"; } case OP_LOADK: case OP_LOADKX: { int b = (op == OP_LOADK) ? GETARG_Bx(i) : GETARG_Ax(p->code[pc + 1]); if (ttisstring(&p->k[b])) { *name = svalue(&p->k[b]); return "constant"; } break; } case OP_SELF: { rkname(p, pc, i, name); return "method"; } default: break; /* go through to return NULL */ } } return NULL; /* could not find reasonable name */ } /* ** Try to find a name for a function based on the code that called it. ** (Only works when function was called by a Lua function.) ** Returns what the name is (e.g., "for iterator", "method", ** "metamethod") and sets '*name' to point to the name. */ static const char *funcnamefromcode (lua_State *L, CallInfo *ci, const char **name) { TMS tm = (TMS)0; /* (initial value avoids warnings) */ const Proto *p = ci_func(ci)->p; /* calling function */ int pc = currentpc(ci); /* calling instruction index */ Instruction i = p->code[pc]; /* calling instruction */ if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */ *name = "?"; return "hook"; } switch (GET_OPCODE(i)) { case OP_CALL: case OP_TAILCALL: return getobjname(p, pc, GETARG_A(i), name); /* get function name */ case OP_TFORCALL: { /* for iterator */ *name = "for iterator"; return "for iterator"; } /* other instructions can do calls through metamethods */ case OP_SELF: case OP_GETTABUP: case OP_GETTABLE: case OP_GETI: case OP_GETFIELD: tm = TM_INDEX; break; case OP_SETTABUP: case OP_SETTABLE: case OP_SETI: case OP_SETFIELD: tm = TM_NEWINDEX; break; case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: { tm = cast(TMS, GETARG_C(i)); break; } case OP_UNM: tm = TM_UNM; break; case OP_BNOT: tm = TM_BNOT; break; case OP_LEN: tm = TM_LEN; break; case OP_CONCAT: tm = TM_CONCAT; break; case OP_EQ: tm = TM_EQ; break; case OP_LT: case OP_LE: case OP_LTI: case OP_LEI: *name = "order"; /* '<=' can call '__lt', etc. */ return "metamethod"; case OP_CLOSE: case OP_RETURN: *name = "close"; return "metamethod"; default: return NULL; /* cannot find a reasonable name */ } *name = getstr(G(L)->tmname[tm]) + 2; return "metamethod"; } /* }====================================================== */ /* ** The subtraction of two potentially unrelated pointers is ** not ISO C, but it should not crash a program; the subsequent ** checks are ISO C and ensure a correct result. */ static int isinstack (CallInfo *ci, const TValue *o) { StkId base = ci->func + 1; ptrdiff_t i = cast(StkId, o) - base; return (0 <= i && i < (ci->top - base) && s2v(base + i) == o); } /* ** Checks whether value 'o' came from an upvalue. (That can only happen ** with instructions OP_GETTABUP/OP_SETTABUP, which operate directly on ** upvalues.) */ static const char *getupvalname (CallInfo *ci, const TValue *o, const char **name) { LClosure *c = ci_func(ci); int i; for (i = 0; i < c->nupvalues; i++) { if (c->upvals[i]->v == o) { *name = upvalname(c->p, i); return "upvalue"; } } return NULL; } static const char *varinfo (lua_State *L, const TValue *o) { const char *name = NULL; /* to avoid warnings */ CallInfo *ci = L->ci; const char *kind = NULL; if (isLua(ci)) { kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */ if (!kind && isinstack(ci, o)) /* no? try a register */ kind = getobjname(ci_func(ci)->p, currentpc(ci), cast_int(cast(StkId, o) - (ci->func + 1)), &name); } return (kind) ? luaO_pushfstring(L, " (%s '%s')", kind, name) : ""; } l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) { const char *t = luaT_objtypename(L, o); luaG_runerror(L, "attempt to %s a %s value%s", op, t, varinfo(L, o)); } l_noret luaG_forerror (lua_State *L, const TValue *o, const char *what) { luaG_runerror(L, "bad 'for' %s (number expected, got %s)", what, luaT_objtypename(L, o)); } l_noret luaG_concaterror (lua_State *L, const TValue *p1, const TValue *p2) { if (ttisstring(p1) || cvt2str(p1)) p1 = p2; luaG_typeerror(L, p1, "concatenate"); } l_noret luaG_opinterror (lua_State *L, const TValue *p1, const TValue *p2, const char *msg) { if (!ttisnumber(p1)) /* first operand is wrong? */ p2 = p1; /* now second is wrong */ luaG_typeerror(L, p2, msg); } /* ** Error when both values are convertible to numbers, but not to integers */ l_noret luaG_tointerror (lua_State *L, const TValue *p1, const TValue *p2) { lua_Integer temp; if (!tointegerns(p1, &temp)) p2 = p1; luaG_runerror(L, "number%s has no integer representation", varinfo(L, p2)); } l_noret luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) { const char *t1 = luaT_objtypename(L, p1); const char *t2 = luaT_objtypename(L, p2); if (strcmp(t1, t2) == 0) luaG_runerror(L, "attempt to compare two %s values", t1); else luaG_runerror(L, "attempt to compare %s with %s", t1, t2); } /* add src:line information to 'msg' */ const char *luaG_addinfo (lua_State *L, const char *msg, TString *src, int line) { char buff[LUA_IDSIZE]; if (src) luaO_chunkid(buff, getstr(src), tsslen(src)); else { /* no source available; use "?" instead */ buff[0] = '?'; buff[1] = '\0'; } return luaO_pushfstring(L, "%s:%d: %s", buff, line, msg); } l_noret luaG_errormsg (lua_State *L) { if (L->errfunc != 0) { /* is there an error handling function? */ StkId errfunc = restorestack(L, L->errfunc); lua_assert(ttisfunction(s2v(errfunc))); setobjs2s(L, L->top, L->top - 1); /* move argument */ setobjs2s(L, L->top - 1, errfunc); /* push function */ L->top++; /* assume EXTRA_STACK */ luaD_callnoyield(L, L->top - 2, 1); /* call it */ } luaD_throw(L, LUA_ERRRUN); } l_noret luaG_runerror (lua_State *L, const char *fmt, ...) { CallInfo *ci = L->ci; const char *msg; va_list argp; luaC_checkGC(L); /* error message uses memory */ va_start(argp, fmt); msg = luaO_pushvfstring(L, fmt, argp); /* format message */ va_end(argp); if (isLua(ci)) /* if Lua function, add source:line information */ luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci)); luaG_errormsg(L); } /* ** Check whether new instruction 'newpc' is in a different line from ** previous instruction 'oldpc'. */ static int changedline (const Proto *p, int oldpc, int newpc) { while (oldpc++ < newpc) { if (p->lineinfo[oldpc] != 0) return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc)); } return 0; /* no line changes in the way */ } /* ** Traces the execution of a Lua function. Called before the execution ** of each opcode, when debug is on. 'L->oldpc' stores the last ** instruction traced, to detect line changes. When entering a new ** function, 'npci' will be zero and will test as a new line without ** the need for 'oldpc'; so, 'oldpc' does not need to be initialized ** before. Some exceptional conditions may return to a function without ** updating 'oldpc'. In that case, 'oldpc' may be invalid; if so, it is ** reset to zero. (A wrong but valid 'oldpc' at most causes an extra ** call to a line hook.) */ int luaG_traceexec (lua_State *L, const Instruction *pc) { CallInfo *ci = L->ci; lu_byte mask = L->hookmask; const Proto *p = ci_func(ci)->p; int counthook; /* 'L->oldpc' may be invalid; reset it in this case */ int oldpc = (L->oldpc < p->sizecode) ? L->oldpc : 0; if (!(mask & (LUA_MASKLINE | LUA_MASKCOUNT))) { /* no hooks? */ ci->u.l.trap = 0; /* don't need to stop again */ return 0; /* turn off 'trap' */ } pc++; /* reference is always next instruction */ ci->u.l.savedpc = pc; /* save 'pc' */ counthook = (--L->hookcount == 0 && (mask & LUA_MASKCOUNT)); if (counthook) resethookcount(L); /* reset count */ else if (!(mask & LUA_MASKLINE)) return 1; /* no line hook and count != 0; nothing to be done now */ if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */ ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */ return 1; /* do not call hook again (VM yielded, so it did not move) */ } if (!isIT(*(ci->u.l.savedpc - 1))) L->top = ci->top; /* prepare top */ if (counthook) luaD_hook(L, LUA_HOOKCOUNT, -1, 0, 0); /* call count hook */ if (mask & LUA_MASKLINE) { int npci = pcRel(pc, p); if (npci == 0 || /* call linehook when enter a new function, */ pc <= invpcRel(oldpc, p) || /* when jump back (loop), or when */ changedline(p, oldpc, npci)) { /* enter new line */ int newline = luaG_getfuncline(p, npci); luaD_hook(L, LUA_HOOKLINE, newline, 0, 0); /* call line hook */ } L->oldpc = npci; /* 'pc' of last call to line hook */ } if (L->status == LUA_YIELD) { /* did hook yield? */ if (counthook) L->hookcount = 1; /* undo decrement to zero */ ci->u.l.savedpc--; /* undo increment (resume will increment it again) */ ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */ luaD_throw(L, LUA_YIELD); } return 1; /* keep 'trap' on */ }
./CrossVul/dataset_final_sorted/CWE-191/c/bad_4272_0
crossvul-cpp_data_good_4828_0
/* * The two pass scaling function is based on: * Filtered Image Rescaling * Based on Gems III * - Schumacher general filtered image rescaling * (pp. 414-424) * by Dale Schumacher * * Additional changes by Ray Gardener, Daylon Graphics Ltd. * December 4, 1999 * * Ported to libgd by Pierre Joye. Support for multiple channels * added (argb for now). * * Initial sources code is avaibable in the Gems Source Code Packages: * http://www.acm.org/pubs/tog/GraphicsGems/GGemsIII.tar.gz * */ /* Summary: - Horizontal filter contributions are calculated on the fly, as each column is mapped from src to dst image. This lets us omit having to allocate a temporary full horizontal stretch of the src image. - If none of the src pixels within a sampling region differ, then the output pixel is forced to equal (any of) the source pixel. This ensures that filters do not corrupt areas of constant color. - Filter weight contribution results, after summing, are rounded to the nearest pixel color value instead of being casted to ILubyte (usually an int or char). Otherwise, artifacting occurs. */ /* Additional functions are available for simple rotation or up/downscaling. downscaling using the fixed point implementations are usually much faster than the existing gdImageCopyResampled while having a similar or better quality. For image rotations, the optimized versions have a lazy antialiasing for the edges of the images. For a much better antialiased result, the affine function is recommended. */ /* TODO: - Optimize pixel accesses and loops once we have continuous buffer - Add scale support for a portion only of an image (equivalent of copyresized/resampled) */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #undef NDEBUG /* Comment out this line to enable asserts. * TODO: This logic really belongs in cmake and configure. */ #define NDEBUG 1 #include <assert.h> #include "gd.h" #include "gdhelpers.h" #include "gd_intern.h" #ifdef _MSC_VER # pragma optimize("t", on) # include <emmintrin.h> #endif static gdImagePtr gdImageScaleBilinear(gdImagePtr im, const unsigned int new_width, const unsigned int new_height); static gdImagePtr gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width, const unsigned int height); static gdImagePtr gdImageScaleNearestNeighbour(gdImagePtr im, const unsigned int width, const unsigned int height); static gdImagePtr gdImageRotateNearestNeighbour(gdImagePtr src, const float degrees, const int bgColor); static gdImagePtr gdImageRotateGeneric(gdImagePtr src, const float degrees, const int bgColor); #define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x))) /* only used here, let do a generic fixed point integers later if required by other part of GD */ typedef long gdFixed; /* Integer to fixed point */ #define gd_itofx(x) ((x) << 8) /* Float to fixed point */ #define gd_ftofx(x) (long)((x) * 256) /* Double to fixed point */ #define gd_dtofx(x) (long)((x) * 256) /* Fixed point to integer */ #define gd_fxtoi(x) ((x) >> 8) /* Fixed point to float */ # define gd_fxtof(x) ((float)(x) / 256) /* Fixed point to double */ #define gd_fxtod(x) ((double)(x) / 256) /* Multiply a fixed by a fixed */ #define gd_mulfx(x,y) (((x) * (y)) >> 8) /* Divide a fixed by a fixed */ #define gd_divfx(x,y) (((x) << 8) / (y)) typedef struct { double *Weights; /* Normalized weights of neighboring pixels */ int Left,Right; /* Bounds of source pixels window */ } ContributionType; /* Contirbution information for a single pixel */ typedef struct { ContributionType *ContribRow; /* Row (or column) of contribution weights */ unsigned int WindowSize, /* Filter window size (of affecting source pixels) */ LineLength; /* Length of line (no. or rows / cols) */ } LineContribType; /* Each core filter has its own radius */ #define DEFAULT_FILTER_LINEAR 1.0f #define DEFAULT_FILTER_BICUBIC 3.0f #define DEFAULT_FILTER_BOX 0.5f #define DEFAULT_FILTER_GENERALIZED_CUBIC 0.5f #define DEFAULT_FILTER_RADIUS 1.0f #define DEFAULT_LANCZOS8_RADIUS 8.0f #define DEFAULT_LANCZOS3_RADIUS 3.0f #define DEFAULT_HERMITE_RADIUS 1.0f #define DEFAULT_BOX_RADIUS 0.5f #define DEFAULT_TRIANGLE_RADIUS 1.0f #define DEFAULT_BELL_RADIUS 1.5f #define DEFAULT_CUBICSPLINE_RADIUS 2.0f #define DEFAULT_MITCHELL_RADIUS 2.0f #define DEFAULT_COSINE_RADIUS 1.0f #define DEFAULT_CATMULLROM_RADIUS 2.0f #define DEFAULT_QUADRATIC_RADIUS 1.5f #define DEFAULT_QUADRATICBSPLINE_RADIUS 1.5f #define DEFAULT_CUBICCONVOLUTION_RADIUS 3.0f #define DEFAULT_GAUSSIAN_RADIUS 1.0f #define DEFAULT_HANNING_RADIUS 1.0f #define DEFAULT_HAMMING_RADIUS 1.0f #define DEFAULT_SINC_RADIUS 1.0f #define DEFAULT_WELSH_RADIUS 1.0f static double KernelBessel_J1(const double x) { double p, q; register long i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p = Pone[8]; q = Qone[8]; for (i=7; i >= 0; i--) { p = p*x*x+Pone[i]; q = q*x*x+Qone[i]; } return (double)(p/q); } static double KernelBessel_P1(const double x) { double p, q; register long i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); } static double KernelBessel_Q1(const double x) { double p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); } static double KernelBessel_Order1(double x) { double p, q; if (x == 0.0) return (0.0f); p = x; if (x < 0.0) x=(-x); if (x < 8.0) return (p*KernelBessel_J1(x)); q = (double)sqrt(2.0f/(M_PI*x))*(double)(KernelBessel_P1(x)*(1.0f/sqrt(2.0f)*(sin(x)-cos(x)))-8.0f/x*KernelBessel_Q1(x)* (-1.0f/sqrt(2.0f)*(sin(x)+cos(x)))); if (p < 0.0f) q = (-q); return (q); } static double filter_bessel(const double x) { if (x == 0.0f) return (double)(M_PI/4.0f); return (KernelBessel_Order1((double)M_PI*x)/(2.0f*x)); } static double filter_blackman(const double x) { return (0.42f+0.5f*(double)cos(M_PI*x)+0.08f*(double)cos(2.0f*M_PI*x)); } double filter_linear(const double x) { double ax = fabs(x); if (ax < 1.0f) { return (1.0f - ax); } return 0.0f; } /** * Bicubic interpolation kernel (a=-1): \verbatim / | 1-2|t|**2+|t|**3 , if |t| < 1 h(t) = | 4-8|t|+5|t|**2-|t|**3 , if 1<=|t|<2 | 0 , otherwise \ \endverbatim * ***bd*** 2.2004 */ static double filter_bicubic(const double t) { const double abs_t = (double)fabs(t); const double abs_t_sq = abs_t * abs_t; if (abs_t<1) return 1-2*abs_t_sq+abs_t_sq*abs_t; if (abs_t<2) return 4 - 8*abs_t +5*abs_t_sq - abs_t_sq*abs_t; return 0; } /** * Generalized cubic kernel (for a=-1 it is the same as BicubicKernel): \verbatim / | (a+2)|t|**3 - (a+3)|t|**2 + 1 , |t| <= 1 h(t) = | a|t|**3 - 5a|t|**2 + 8a|t| - 4a , 1 < |t| <= 2 | 0 , otherwise \ \endverbatim * Often used values for a are -1 and -1/2. */ static double filter_generalized_cubic(const double t) { const double a = -DEFAULT_FILTER_GENERALIZED_CUBIC; double abs_t = (double)fabs(t); double abs_t_sq = abs_t * abs_t; if (abs_t < 1) return (a + 2) * abs_t_sq * abs_t - (a + 3) * abs_t_sq + 1; if (abs_t < 2) return a * abs_t_sq * abs_t - 5 * a * abs_t_sq + 8 * a * abs_t - 4 * a; return 0; } #ifdef FUNCTION_NOT_USED_YET /* CubicSpline filter, default radius 2 */ static double filter_cubic_spline(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0 ) { const double x2 = x*x; return (0.5 * x2 * x - x2 + 2.0 / 3.0); } if (x < 2.0) { return (pow(2.0 - x, 3.0)/6.0); } return 0; } #endif #ifdef FUNCTION_NOT_USED_YET /* CubicConvolution filter, default radius 3 */ static double filter_cubic_convolution(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; const double x2 = x1 * x1; const double x2_x = x2 * x; if (x <= 1.0) return ((4.0 / 3.0)* x2_x - (7.0 / 3.0) * x2 + 1.0); if (x <= 2.0) return (- (7.0 / 12.0) * x2_x + 3 * x2 - (59.0 / 12.0) * x + 2.5); if (x <= 3.0) return ( (1.0/12.0) * x2_x - (2.0 / 3.0) * x2 + 1.75 * x - 1.5); return 0; } #endif static double filter_box(double x) { if (x < - DEFAULT_FILTER_BOX) return 0.0f; if (x < DEFAULT_FILTER_BOX) return 1.0f; return 0.0f; } static double filter_catmullrom(const double x) { if (x < -2.0) return(0.0f); if (x < -1.0) return(0.5f*(4.0f+x*(8.0f+x*(5.0f+x)))); if (x < 0.0) return(0.5f*(2.0f+x*x*(-5.0f-3.0f*x))); if (x < 1.0) return(0.5f*(2.0f+x*x*(-5.0f+3.0f*x))); if (x < 2.0) return(0.5f*(4.0f+x*(-8.0f+x*(5.0f-x)))); return(0.0f); } #ifdef FUNCTION_NOT_USED_YET static double filter_filter(double t) { /* f(t) = 2|t|^3 - 3|t|^2 + 1, -1 <= t <= 1 */ if(t < 0.0) t = -t; if(t < 1.0) return((2.0 * t - 3.0) * t * t + 1.0); return(0.0); } #endif #ifdef FUNCTION_NOT_USED_YET /* Lanczos8 filter, default radius 8 */ static double filter_lanczos8(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; #define R DEFAULT_LANCZOS8_RADIUS if ( x == 0.0) return 1; if ( x < R) { return R * sin(x*M_PI) * sin(x * M_PI/ R) / (x * M_PI * x * M_PI); } return 0.0; #undef R } #endif #ifdef FUNCTION_NOT_USED_YET /* Lanczos3 filter, default radius 3 */ static double filter_lanczos3(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; #define R DEFAULT_LANCZOS3_RADIUS if ( x == 0.0) return 1; if ( x < R) { return R * sin(x*M_PI) * sin(x * M_PI / R) / (x * M_PI * x * M_PI); } return 0.0; #undef R } #endif /* Hermite filter, default radius 1 */ static double filter_hermite(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0) return ((2.0 * x - 3) * x * x + 1.0 ); return 0.0; } /* Trangle filter, default radius 1 */ static double filter_triangle(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 1.0) return (1.0 - x); return 0.0; } /* Bell filter, default radius 1.5 */ static double filter_bell(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x < 0.5) return (0.75 - x*x); if (x < 1.5) return (0.5 * pow(x - 1.5, 2.0)); return 0.0; } /* Mitchell filter, default radius 2.0 */ static double filter_mitchell(const double x) { #define KM_B (1.0f/3.0f) #define KM_C (1.0f/3.0f) #define KM_P0 (( 6.0f - 2.0f * KM_B ) / 6.0f) #define KM_P2 ((-18.0f + 12.0f * KM_B + 6.0f * KM_C) / 6.0f) #define KM_P3 (( 12.0f - 9.0f * KM_B - 6.0f * KM_C) / 6.0f) #define KM_Q0 (( 8.0f * KM_B + 24.0f * KM_C) / 6.0f) #define KM_Q1 ((-12.0f * KM_B - 48.0f * KM_C) / 6.0f) #define KM_Q2 (( 6.0f * KM_B + 30.0f * KM_C) / 6.0f) #define KM_Q3 (( -1.0f * KM_B - 6.0f * KM_C) / 6.0f) if (x < -2.0) return(0.0f); if (x < -1.0) return(KM_Q0-x*(KM_Q1-x*(KM_Q2-x*KM_Q3))); if (x < 0.0f) return(KM_P0+x*x*(KM_P2-x*KM_P3)); if (x < 1.0f) return(KM_P0+x*x*(KM_P2+x*KM_P3)); if (x < 2.0f) return(KM_Q0+x*(KM_Q1+x*(KM_Q2+x*KM_Q3))); return(0.0f); } #ifdef FUNCTION_NOT_USED_YET /* Cosine filter, default radius 1 */ static double filter_cosine(const double x) { if ((x >= -1.0) && (x <= 1.0)) return ((cos(x * M_PI) + 1.0)/2.0); return 0; } #endif /* Quadratic filter, default radius 1.5 */ static double filter_quadratic(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x <= 0.5) return (- 2.0 * x * x + 1); if (x <= 1.5) return (x * x - 2.5* x + 1.5); return 0.0; } static double filter_bspline(const double x) { if (x>2.0f) { return 0.0f; } else { double a, b, c, d; /* Was calculated anyway cause the "if((x-1.0f) < 0)" */ const double xm1 = x - 1.0f; const double xp1 = x + 1.0f; const double xp2 = x + 2.0f; if ((xp2) <= 0.0f) a = 0.0f; else a = xp2*xp2*xp2; if ((xp1) <= 0.0f) b = 0.0f; else b = xp1*xp1*xp1; if (x <= 0) c = 0.0f; else c = x*x*x; if ((xm1) <= 0.0f) d = 0.0f; else d = xm1*xm1*xm1; return (0.16666666666666666667f * (a - (4.0f * b) + (6.0f * c) - (4.0f * d))); } } #ifdef FUNCTION_NOT_USED_YET /* QuadraticBSpline filter, default radius 1.5 */ static double filter_quadratic_bspline(const double x1) { const double x = x1 < 0.0 ? -x1 : x1; if (x <= 0.5) return (- x * x + 0.75); if (x <= 1.5) return (0.5 * x * x - 1.5 * x + 1.125); return 0.0; } #endif static double filter_gaussian(const double x) { /* return(exp((double) (-2.0 * x * x)) * sqrt(2.0 / M_PI)); */ return (double)(exp(-2.0f * x * x) * 0.79788456080287f); } static double filter_hanning(const double x) { /* A Cosine windowing function */ return(0.5 + 0.5 * cos(M_PI * x)); } static double filter_hamming(const double x) { /* should be (0.54+0.46*cos(M_PI*(double) x)); but this approximation is sufficient */ if (x < -1.0f) return 0.0f; if (x < 0.0f) return 0.92f*(-2.0f*x-3.0f)*x*x+1.0f; if (x < 1.0f) return 0.92f*(2.0f*x-3.0f)*x*x+1.0f; return 0.0f; } static double filter_power(const double x) { const double a = 2.0f; if (fabs(x)>1) return 0.0f; return (1.0f - (double)fabs(pow(x,a))); } static double filter_sinc(const double x) { /* X-scaled Sinc(x) function. */ if (x == 0.0) return(1.0); return (sin(M_PI * (double) x) / (M_PI * (double) x)); } #ifdef FUNCTION_NOT_USED_YET static double filter_welsh(const double x) { /* Welsh parabolic windowing filter */ if (x < 1.0) return(1 - x*x); return(0.0); } #endif #if defined(_MSC_VER) && !defined(inline) # define inline __inline #endif /* keep it for future usage for affine copy over an existing image, targetting fix for 2.2.2 */ #ifdef FUNCTION_NOT_USED_YET /* Copied from upstream's libgd */ static inline int _color_blend (const int dst, const int src) { const int src_alpha = gdTrueColorGetAlpha(src); if( src_alpha == gdAlphaOpaque ) { return src; } else { const int dst_alpha = gdTrueColorGetAlpha(dst); if( src_alpha == gdAlphaTransparent ) return dst; if( dst_alpha == gdAlphaTransparent ) { return src; } else { register int alpha, red, green, blue; const int src_weight = gdAlphaTransparent - src_alpha; const int dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax; const int tot_weight = src_weight + dst_weight; alpha = src_alpha * dst_alpha / gdAlphaMax; red = (gdTrueColorGetRed(src) * src_weight + gdTrueColorGetRed(dst) * dst_weight) / tot_weight; green = (gdTrueColorGetGreen(src) * src_weight + gdTrueColorGetGreen(dst) * dst_weight) / tot_weight; blue = (gdTrueColorGetBlue(src) * src_weight + gdTrueColorGetBlue(dst) * dst_weight) / tot_weight; return ((alpha << 24) + (red << 16) + (green << 8) + blue); } } } static inline int _setEdgePixel(const gdImagePtr src, unsigned int x, unsigned int y, gdFixed coverage, const int bgColor) { const gdFixed f_127 = gd_itofx(127); register int c = src->tpixels[y][x]; c = c | (( (int) (gd_fxtof(gd_mulfx(coverage, f_127)) + 50.5f)) << 24); return _color_blend(bgColor, c); } #endif static inline int getPixelOverflowTC(gdImagePtr im, const int x, const int y, const int bgColor) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->tpixels[y][x]; if (c == im->transparent) { return bgColor == -1 ? gdTrueColorAlpha(0, 0, 0, 127) : bgColor; } return c; } else { return bgColor; } } #define colorIndex2RGBA(c) gdTrueColorAlpha(im->red[(c)], im->green[(c)], im->blue[(c)], im->alpha[(c)]) #define colorIndex2RGBcustomA(c, a) gdTrueColorAlpha(im->red[(c)], im->green[(c)], im->blue[(c)], im->alpha[(a)]) static inline int getPixelOverflowPalette(gdImagePtr im, const int x, const int y, const int bgColor) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->pixels[y][x]; if (c == im->transparent) { return bgColor == -1 ? gdTrueColorAlpha(0, 0, 0, 127) : bgColor; } return colorIndex2RGBA(c); } else { return bgColor; } } static int getPixelInterpolateWeight(gdImagePtr im, const double x, const double y, const int bgColor) { /* Closest pixel <= (xf,yf) */ int sx = (int)(x); int sy = (int)(y); const double xf = x - (double)sx; const double yf = y - (double)sy; const double nxf = (double) 1.0 - xf; const double nyf = (double) 1.0 - yf; const double m1 = xf * yf; const double m2 = nxf * yf; const double m3 = xf * nyf; const double m4 = nxf * nyf; /* get color values of neighbouring pixels */ const int c1 = im->trueColor == 1 ? getPixelOverflowTC(im, sx, sy, bgColor) : getPixelOverflowPalette(im, sx, sy, bgColor); const int c2 = im->trueColor == 1 ? getPixelOverflowTC(im, sx - 1, sy, bgColor) : getPixelOverflowPalette(im, sx - 1, sy, bgColor); const int c3 = im->trueColor == 1 ? getPixelOverflowTC(im, sx, sy - 1, bgColor) : getPixelOverflowPalette(im, sx, sy - 1, bgColor); const int c4 = im->trueColor == 1 ? getPixelOverflowTC(im, sx - 1, sy - 1, bgColor) : getPixelOverflowPalette(im, sx, sy - 1, bgColor); int r, g, b, a; if (x < 0) sx--; if (y < 0) sy--; /* component-wise summing-up of color values */ if (im->trueColor) { r = (int)(m1*gdTrueColorGetRed(c1) + m2*gdTrueColorGetRed(c2) + m3*gdTrueColorGetRed(c3) + m4*gdTrueColorGetRed(c4)); g = (int)(m1*gdTrueColorGetGreen(c1) + m2*gdTrueColorGetGreen(c2) + m3*gdTrueColorGetGreen(c3) + m4*gdTrueColorGetGreen(c4)); b = (int)(m1*gdTrueColorGetBlue(c1) + m2*gdTrueColorGetBlue(c2) + m3*gdTrueColorGetBlue(c3) + m4*gdTrueColorGetBlue(c4)); a = (int)(m1*gdTrueColorGetAlpha(c1) + m2*gdTrueColorGetAlpha(c2) + m3*gdTrueColorGetAlpha(c3) + m4*gdTrueColorGetAlpha(c4)); } else { r = (int)(m1*im->red[(c1)] + m2*im->red[(c2)] + m3*im->red[(c3)] + m4*im->red[(c4)]); g = (int)(m1*im->green[(c1)] + m2*im->green[(c2)] + m3*im->green[(c3)] + m4*im->green[(c4)]); b = (int)(m1*im->blue[(c1)] + m2*im->blue[(c2)] + m3*im->blue[(c3)] + m4*im->blue[(c4)]); a = (int)(m1*im->alpha[(c1)] + m2*im->alpha[(c2)] + m3*im->alpha[(c3)] + m4*im->alpha[(c4)]); } r = CLAMP(r, 0, 255); g = CLAMP(g, 0, 255); b = CLAMP(b, 0, 255); a = CLAMP(a, 0, gdAlphaMax); return gdTrueColorAlpha(r, g, b, a); } /** * InternalFunction: getPixelInterpolated * Returns the interpolated color value using the default interpolation * method. The returned color is always in the ARGB format (truecolor). * * Parameters: * im - Image to set the default interpolation method * y - X value of the ideal position * y - Y value of the ideal position * method - Interpolation method <gdInterpolationMethod> * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE * * See also: * <gdSetInterpolationMethod> */ int getPixelInterpolated(gdImagePtr im, const double x, const double y, const int bgColor) { const int xi=(int)(x); const int yi=(int)(y); int yii; int i; double kernel, kernel_cache_y; double kernel_x[12], kernel_y[4]; double new_r = 0.0f, new_g = 0.0f, new_b = 0.0f, new_a = 0.0f; /* These methods use special implementations */ if (im->interpolation_id == GD_NEAREST_NEIGHBOUR) { return -1; } if (im->interpolation_id == GD_WEIGHTED4) { return getPixelInterpolateWeight(im, x, y, bgColor); } if (im->interpolation_id == GD_NEAREST_NEIGHBOUR) { if (im->trueColor == 1) { return getPixelOverflowTC(im, xi, yi, bgColor); } else { return getPixelOverflowPalette(im, xi, yi, bgColor); } } if (im->interpolation) { for (i=0; i<4; i++) { kernel_x[i] = (double) im->interpolation((double)(xi+i-1-x)); kernel_y[i] = (double) im->interpolation((double)(yi+i-1-y)); } } else { return -1; } /* * TODO: use the known fast rgba multiplication implementation once * the new formats are in place */ for (yii = yi-1; yii < yi+3; yii++) { int xii; kernel_cache_y = kernel_y[yii-(yi-1)]; if (im->trueColor) { for (xii=xi-1; xii<xi+3; xii++) { const int rgbs = getPixelOverflowTC(im, xii, yii, bgColor); kernel = kernel_cache_y * kernel_x[xii-(xi-1)]; new_r += kernel * gdTrueColorGetRed(rgbs); new_g += kernel * gdTrueColorGetGreen(rgbs); new_b += kernel * gdTrueColorGetBlue(rgbs); new_a += kernel * gdTrueColorGetAlpha(rgbs); } } else { for (xii=xi-1; xii<xi+3; xii++) { const int rgbs = getPixelOverflowPalette(im, xii, yii, bgColor); kernel = kernel_cache_y * kernel_x[xii-(xi-1)]; new_r += kernel * gdTrueColorGetRed(rgbs); new_g += kernel * gdTrueColorGetGreen(rgbs); new_b += kernel * gdTrueColorGetBlue(rgbs); new_a += kernel * gdTrueColorGetAlpha(rgbs); } } } new_r = CLAMP(new_r, 0, 255); new_g = CLAMP(new_g, 0, 255); new_b = CLAMP(new_b, 0, 255); new_a = CLAMP(new_a, 0, gdAlphaMax); return gdTrueColorAlpha(((int)new_r), ((int)new_g), ((int)new_b), ((int)new_a)); } static inline LineContribType * _gdContributionsAlloc(unsigned int line_length, unsigned int windows_size) { unsigned int u = 0; LineContribType *res; size_t weights_size; if (overflow2(windows_size, sizeof(double))) { return NULL; } else { weights_size = windows_size * sizeof(double); } res = (LineContribType *) gdMalloc(sizeof(LineContribType)); if (!res) { return NULL; } res->WindowSize = windows_size; res->LineLength = line_length; if (overflow2(line_length, sizeof(ContributionType))) { gdFree(res); return NULL; } res->ContribRow = (ContributionType *) gdMalloc(line_length * sizeof(ContributionType)); if (res->ContribRow == NULL) { gdFree(res); return NULL; } for (u = 0 ; u < line_length ; u++) { res->ContribRow[u].Weights = (double *) gdMalloc(weights_size); if (res->ContribRow[u].Weights == NULL) { unsigned int i; for (i=0;i<u;i++) { gdFree(res->ContribRow[i].Weights); } gdFree(res->ContribRow); gdFree(res); return NULL; } } return res; } static inline void _gdContributionsFree(LineContribType * p) { unsigned int u; for (u = 0; u < p->LineLength; u++) { gdFree(p->ContribRow[u].Weights); } gdFree(p->ContribRow); gdFree(p); } static inline LineContribType *_gdContributionsCalc(unsigned int line_size, unsigned int src_size, double scale_d, const interpolation_method pFilter) { double width_d; double scale_f_d = 1.0; const double filter_width_d = DEFAULT_BOX_RADIUS; int windows_size; unsigned int u; LineContribType *res; if (scale_d < 1.0) { width_d = filter_width_d / scale_d; scale_f_d = scale_d; } else { width_d= filter_width_d; } windows_size = 2 * (int)ceil(width_d) + 1; res = _gdContributionsAlloc(line_size, windows_size); if (res == NULL) { return NULL; } for (u = 0; u < line_size; u++) { const double dCenter = (double)u / scale_d; /* get the significant edge points affecting the pixel */ register int iLeft = MAX(0, (int)floor (dCenter - width_d)); int iRight = MIN((int)ceil(dCenter + width_d), (int)src_size - 1); double dTotalWeight = 0.0; int iSrc; /* Cut edge points to fit in filter window in case of spill-off */ if (iRight - iLeft + 1 > windows_size) { if (iLeft < ((int)src_size - 1 / 2)) { iLeft++; } else { iRight--; } } res->ContribRow[u].Left = iLeft; res->ContribRow[u].Right = iRight; for (iSrc = iLeft; iSrc <= iRight; iSrc++) { dTotalWeight += (res->ContribRow[u].Weights[iSrc-iLeft] = scale_f_d * (*pFilter)(scale_f_d * (dCenter - (double)iSrc))); } if (dTotalWeight < 0.0) { _gdContributionsFree(res); return NULL; } if (dTotalWeight > 0.0) { for (iSrc = iLeft; iSrc <= iRight; iSrc++) { res->ContribRow[u].Weights[iSrc-iLeft] /= dTotalWeight; } } } return res; } static inline void _gdScaleOneAxis(gdImagePtr pSrc, gdImagePtr dst, unsigned int dst_len, unsigned int row, LineContribType *contrib, gdAxis axis) { unsigned int ndx; for (ndx = 0; ndx < dst_len; ndx++) { double r = 0, g = 0, b = 0, a = 0; const int left = contrib->ContribRow[ndx].Left; const int right = contrib->ContribRow[ndx].Right; int *dest = (axis == HORIZONTAL) ? &dst->tpixels[row][ndx] : &dst->tpixels[ndx][row]; int i; /* Accumulate each channel */ for (i = left; i <= right; i++) { const int left_channel = i - left; const int srcpx = (axis == HORIZONTAL) ? pSrc->tpixels[row][i] : pSrc->tpixels[i][row]; r += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetRed(srcpx)); g += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetGreen(srcpx)); b += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetBlue(srcpx)); a += contrib->ContribRow[ndx].Weights[left_channel] * (double)(gdTrueColorGetAlpha(srcpx)); }/* for */ *dest = gdTrueColorAlpha(uchar_clamp(r, 0xFF), uchar_clamp(g, 0xFF), uchar_clamp(b, 0xFF), uchar_clamp(a, 0x7F)); /* alpha is 0..127 */ }/* for */ }/* _gdScaleOneAxis*/ static inline int _gdScalePass(const gdImagePtr pSrc, const unsigned int src_len, const gdImagePtr pDst, const unsigned int dst_len, const unsigned int num_lines, const gdAxis axis) { unsigned int line_ndx; LineContribType * contrib; /* Same dim, just copy it. */ assert(dst_len != src_len); // TODO: caller should handle this. contrib = _gdContributionsCalc(dst_len, src_len, (double)dst_len / (double)src_len, pSrc->interpolation); if (contrib == NULL) { return 0; } /* Scale each line */ for (line_ndx = 0; line_ndx < num_lines; line_ndx++) { _gdScaleOneAxis(pSrc, pDst, dst_len, line_ndx, contrib, axis); } _gdContributionsFree (contrib); return 1; }/* _gdScalePass*/ static gdImagePtr gdImageScaleTwoPass(const gdImagePtr src, const unsigned int new_width, const unsigned int new_height) { const unsigned int src_width = src->sx; const unsigned int src_height = src->sy; gdImagePtr tmp_im = NULL; gdImagePtr dst = NULL; int scale_pass_res; assert(src != NULL); /* First, handle the trivial case. */ if (src_width == new_width && src_height == new_height) { return gdImageClone(src); }/* if */ /* Convert to truecolor if it isn't; this code requires it. */ if (!src->trueColor) { gdImagePaletteToTrueColor(src); }/* if */ /* Scale horizontally unless sizes are the same. */ if (src_width == new_width) { tmp_im = src; } else { tmp_im = gdImageCreateTrueColor(new_width, src_height); if (tmp_im == NULL) { return NULL; } gdImageSetInterpolationMethod(tmp_im, src->interpolation_id); scale_pass_res = _gdScalePass(src, src_width, tmp_im, new_width, src_height, HORIZONTAL); if (scale_pass_res != 1) { gdImageDestroy(tmp_im); return NULL; } }/* if .. else*/ /* If vertical sizes match, we're done. */ if (src_height == new_height) { assert(tmp_im != src); return tmp_im; }/* if */ /* Otherwise, we need to scale vertically. */ dst = gdImageCreateTrueColor(new_width, new_height); if (dst != NULL) { gdImageSetInterpolationMethod(dst, src->interpolation_id); scale_pass_res = _gdScalePass(tmp_im, src_height, dst, new_height, new_width, VERTICAL); if (scale_pass_res != 1) { gdImageDestroy(dst); if (src != tmp_im) { gdImageDestroy(tmp_im); } return NULL; } }/* if */ if (src != tmp_im) { gdImageDestroy(tmp_im); }/* if */ return dst; }/* gdImageScaleTwoPass*/ /* BilinearFixed, BicubicFixed and nearest implementations are rewamped versions of the implementation in CBitmapEx http://www.codeproject.com/Articles/29121/CBitmapEx-Free-C-Bitmap-Manipulation-Class Integer only implementation, good to have for common usages like pre scale very large images before using another interpolation methods for the last step. */ static gdImagePtr gdImageScaleNearestNeighbour(gdImagePtr im, const unsigned int width, const unsigned int height) { const unsigned long new_width = MAX(1, width); const unsigned long new_height = MAX(1, height); const float dx = (float)im->sx / (float)new_width; const float dy = (float)im->sy / (float)new_height; const gdFixed f_dx = gd_ftofx(dx); const gdFixed f_dy = gd_ftofx(dy); gdImagePtr dst_img; unsigned long dst_offset_x; unsigned long dst_offset_y = 0; unsigned int i; dst_img = gdImageCreateTrueColor(new_width, new_height); if (dst_img == NULL) { return NULL; } for (i=0; i<new_height; i++) { unsigned int j; dst_offset_x = 0; if (im->trueColor) { for (j=0; j<new_width; j++) { const gdFixed f_i = gd_itofx(i); const gdFixed f_j = gd_itofx(j); const gdFixed f_a = gd_mulfx(f_i, f_dy); const gdFixed f_b = gd_mulfx(f_j, f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); dst_img->tpixels[dst_offset_y][dst_offset_x++] = im->tpixels[m][n]; } } else { for (j=0; j<new_width; j++) { const gdFixed f_i = gd_itofx(i); const gdFixed f_j = gd_itofx(j); const gdFixed f_a = gd_mulfx(f_i, f_dy); const gdFixed f_b = gd_mulfx(f_j, f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); dst_img->tpixels[dst_offset_y][dst_offset_x++] = colorIndex2RGBA(im->pixels[m][n]); } } dst_offset_y++; } return dst_img; } #if 0 static inline int getPixelOverflowColorTC(gdImagePtr im, const int x, const int y, const int color) { if (gdImageBoundsSafe(im, x, y)) { const int c = im->tpixels[y][x]; if (c == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } return c; } else { register int border = 0; if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y < im->cy1) { border = im->tpixels[0][im->cx1]; goto processborder; } if (y > im->cy2) { if (x >= im->cx1 && x <= im->cx1) { border = im->tpixels[im->cy2][x]; goto processborder; } else { return gdTrueColorAlpha(0, 0, 0, 127); } } /* y is bound safe at this point */ if (x < im->cx1) { border = im->tpixels[y][im->cx1]; goto processborder; } if (x > im->cx2) { border = im->tpixels[y][im->cx2]; } processborder: if (border == im->transparent) { return gdTrueColorAlpha(0, 0, 0, 127); } else{ return gdTrueColorAlpha(gdTrueColorGetRed(border), gdTrueColorGetGreen(border), gdTrueColorGetBlue(border), 127); } } } #endif static gdImagePtr gdImageScaleBilinearPalette(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { long _width = MAX(1, new_width); long _height = MAX(1, new_height); float dx = (float)gdImageSX(im) / (float)_width; float dy = (float)gdImageSY(im) / (float)_height; gdFixed f_dx = gd_ftofx(dx); gdFixed f_dy = gd_ftofx(dy); gdFixed f_1 = gd_itofx(1); int dst_offset_h; int dst_offset_v = 0; long i; gdImagePtr new_img; const int transparent = im->transparent; new_img = gdImageCreateTrueColor(new_width, new_height); if (new_img == NULL) { return NULL; } if (transparent < 0) { /* uninitialized */ new_img->transparent = -1; } else { new_img->transparent = gdTrueColorAlpha(im->red[transparent], im->green[transparent], im->blue[transparent], im->alpha[transparent]); } for (i=0; i < _height; i++) { long j; const gdFixed f_i = gd_itofx(i); const gdFixed f_a = gd_mulfx(f_i, f_dy); register long m = gd_fxtoi(f_a); dst_offset_h = 0; for (j=0; j < _width; j++) { /* Update bitmap */ gdFixed f_j = gd_itofx(j); gdFixed f_b = gd_mulfx(f_j, f_dx); const long n = gd_fxtoi(f_b); gdFixed f_f = f_a - gd_itofx(m); gdFixed f_g = f_b - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); unsigned int pixel1; unsigned int pixel2; unsigned int pixel3; unsigned int pixel4; register gdFixed f_r1, f_r2, f_r3, f_r4, f_g1, f_g2, f_g3, f_g4, f_b1, f_b2, f_b3, f_b4, f_a1, f_a2, f_a3, f_a4; /* 0 for bgColor; (n,m) is supposed to be valid anyway */ pixel1 = getPixelOverflowPalette(im, n, m, 0); pixel2 = getPixelOverflowPalette(im, n + 1, m, pixel1); pixel3 = getPixelOverflowPalette(im, n, m + 1, pixel1); pixel4 = getPixelOverflowPalette(im, n + 1, m + 1, pixel1); f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); { const unsigned char red = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4)); const unsigned char green = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4)); const unsigned char blue = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4)); const unsigned char alpha = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4)); new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha); } dst_offset_h++; } dst_offset_v++; } return new_img; } static gdImagePtr gdImageScaleBilinearTC(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { long dst_w = MAX(1, new_width); long dst_h = MAX(1, new_height); float dx = (float)gdImageSX(im) / (float)dst_w; float dy = (float)gdImageSY(im) / (float)dst_h; gdFixed f_dx = gd_ftofx(dx); gdFixed f_dy = gd_ftofx(dy); gdFixed f_1 = gd_itofx(1); int dst_offset_h; int dst_offset_v = 0; long i; gdImagePtr new_img; new_img = gdImageCreateTrueColor(new_width, new_height); if (!new_img){ return NULL; } for (i=0; i < dst_h; i++) { long j; dst_offset_h = 0; for (j=0; j < dst_w; j++) { /* Update bitmap */ gdFixed f_i = gd_itofx(i); gdFixed f_j = gd_itofx(j); gdFixed f_a = gd_mulfx(f_i, f_dy); gdFixed f_b = gd_mulfx(f_j, f_dx); const gdFixed m = gd_fxtoi(f_a); const gdFixed n = gd_fxtoi(f_b); gdFixed f_f = f_a - gd_itofx(m); gdFixed f_g = f_b - gd_itofx(n); const gdFixed f_w1 = gd_mulfx(f_1-f_f, f_1-f_g); const gdFixed f_w2 = gd_mulfx(f_1-f_f, f_g); const gdFixed f_w3 = gd_mulfx(f_f, f_1-f_g); const gdFixed f_w4 = gd_mulfx(f_f, f_g); unsigned int pixel1; unsigned int pixel2; unsigned int pixel3; unsigned int pixel4; register gdFixed f_r1, f_r2, f_r3, f_r4, f_g1, f_g2, f_g3, f_g4, f_b1, f_b2, f_b3, f_b4, f_a1, f_a2, f_a3, f_a4; /* 0 for bgColor; (n,m) is supposed to be valid anyway */ pixel1 = getPixelOverflowTC(im, n, m, 0); pixel2 = getPixelOverflowTC(im, n + 1, m, pixel1); pixel3 = getPixelOverflowTC(im, n, m + 1, pixel1); pixel4 = getPixelOverflowTC(im, n + 1, m + 1, pixel1); f_r1 = gd_itofx(gdTrueColorGetRed(pixel1)); f_r2 = gd_itofx(gdTrueColorGetRed(pixel2)); f_r3 = gd_itofx(gdTrueColorGetRed(pixel3)); f_r4 = gd_itofx(gdTrueColorGetRed(pixel4)); f_g1 = gd_itofx(gdTrueColorGetGreen(pixel1)); f_g2 = gd_itofx(gdTrueColorGetGreen(pixel2)); f_g3 = gd_itofx(gdTrueColorGetGreen(pixel3)); f_g4 = gd_itofx(gdTrueColorGetGreen(pixel4)); f_b1 = gd_itofx(gdTrueColorGetBlue(pixel1)); f_b2 = gd_itofx(gdTrueColorGetBlue(pixel2)); f_b3 = gd_itofx(gdTrueColorGetBlue(pixel3)); f_b4 = gd_itofx(gdTrueColorGetBlue(pixel4)); f_a1 = gd_itofx(gdTrueColorGetAlpha(pixel1)); f_a2 = gd_itofx(gdTrueColorGetAlpha(pixel2)); f_a3 = gd_itofx(gdTrueColorGetAlpha(pixel3)); f_a4 = gd_itofx(gdTrueColorGetAlpha(pixel4)); { const unsigned char red = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_r1) + gd_mulfx(f_w2, f_r2) + gd_mulfx(f_w3, f_r3) + gd_mulfx(f_w4, f_r4)); const unsigned char green = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_g1) + gd_mulfx(f_w2, f_g2) + gd_mulfx(f_w3, f_g3) + gd_mulfx(f_w4, f_g4)); const unsigned char blue = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_b1) + gd_mulfx(f_w2, f_b2) + gd_mulfx(f_w3, f_b3) + gd_mulfx(f_w4, f_b4)); const unsigned char alpha = (unsigned char) gd_fxtoi(gd_mulfx(f_w1, f_a1) + gd_mulfx(f_w2, f_a2) + gd_mulfx(f_w3, f_a3) + gd_mulfx(f_w4, f_a4)); new_img->tpixels[dst_offset_v][dst_offset_h] = gdTrueColorAlpha(red, green, blue, alpha); } dst_offset_h++; } dst_offset_v++; } return new_img; } static gdImagePtr gdImageScaleBilinear(gdImagePtr im, const unsigned int new_width, const unsigned int new_height) { if (im->trueColor) { return gdImageScaleBilinearTC(im, new_width, new_height); } else { return gdImageScaleBilinearPalette(im, new_width, new_height); } } static gdImagePtr gdImageScaleBicubicFixed(gdImagePtr src, const unsigned int width, const unsigned int height) { const long new_width = MAX(1, width); const long new_height = MAX(1, height); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_dx = gd_ftofx((float)src_w / (float)new_width); const gdFixed f_dy = gd_ftofx((float)src_h / (float)new_height); const gdFixed f_1 = gd_itofx(1); const gdFixed f_2 = gd_itofx(2); const gdFixed f_4 = gd_itofx(4); const gdFixed f_6 = gd_itofx(6); const gdFixed f_gamma = gd_ftofx(1.04f); gdImagePtr dst; unsigned int dst_offset_x; unsigned int dst_offset_y = 0; long i; /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { gdImagePaletteToTrueColor(src); } dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i=0; i < new_height; i++) { long j; dst_offset_x = 0; for (j=0; j < new_width; j++) { const gdFixed f_a = gd_mulfx(gd_itofx(i), f_dy); const gdFixed f_b = gd_mulfx(gd_itofx(j), f_dx); const long m = gd_fxtoi(f_a); const long n = gd_fxtoi(f_b); const gdFixed f_f = f_a - gd_itofx(m); const gdFixed f_g = f_b - gd_itofx(n); unsigned int src_offset_x[16], src_offset_y[16]; long k; register gdFixed f_red = 0, f_green = 0, f_blue = 0, f_alpha = 0; unsigned char red, green, blue, alpha = 0; int *dst_row = dst->tpixels[dst_offset_y]; if ((m < 1) || (n < 1)) { src_offset_x[0] = n; src_offset_y[0] = m; } else { src_offset_x[0] = n - 1; src_offset_y[0] = m; } src_offset_x[1] = n; src_offset_y[1] = m; if ((m < 1) || (n >= src_w - 1)) { src_offset_x[2] = n; src_offset_y[2] = m; } else { src_offset_x[2] = n + 1; src_offset_y[2] = m; } if ((m < 1) || (n >= src_w - 2)) { src_offset_x[3] = n; src_offset_y[3] = m; } else { src_offset_x[3] = n + 1 + 1; src_offset_y[3] = m; } if (n < 1) { src_offset_x[4] = n; src_offset_y[4] = m; } else { src_offset_x[4] = n - 1; src_offset_y[4] = m; } src_offset_x[5] = n; src_offset_y[5] = m; if (n >= src_w-1) { src_offset_x[6] = n; src_offset_y[6] = m; } else { src_offset_x[6] = n + 1; src_offset_y[6] = m; } if (n >= src_w - 2) { src_offset_x[7] = n; src_offset_y[7] = m; } else { src_offset_x[7] = n + 1 + 1; src_offset_y[7] = m; } if ((m >= src_h - 1) || (n < 1)) { src_offset_x[8] = n; src_offset_y[8] = m; } else { src_offset_x[8] = n - 1; src_offset_y[8] = m; } src_offset_x[9] = n; src_offset_y[9] = m; if ((m >= src_h-1) || (n >= src_w-1)) { src_offset_x[10] = n; src_offset_y[10] = m; } else { src_offset_x[10] = n + 1; src_offset_y[10] = m; } if ((m >= src_h - 1) || (n >= src_w - 2)) { src_offset_x[11] = n; src_offset_y[11] = m; } else { src_offset_x[11] = n + 1 + 1; src_offset_y[11] = m; } if ((m >= src_h - 2) || (n < 1)) { src_offset_x[12] = n; src_offset_y[12] = m; } else { src_offset_x[12] = n - 1; src_offset_y[12] = m; } if (!(m >= src_h - 2)) { src_offset_x[13] = n; src_offset_y[13] = m; } if ((m >= src_h - 2) || (n >= src_w - 1)) { src_offset_x[14] = n; src_offset_y[14] = m; } else { src_offset_x[14] = n + 1; src_offset_y[14] = m; } if ((m >= src_h - 2) || (n >= src_w - 2)) { src_offset_x[15] = n; src_offset_y[15] = m; } else { src_offset_x[15] = n + 1 + 1; src_offset_y[15] = m; } for (k = -1; k < 3; k++) { const gdFixed f = gd_itofx(k)-f_f; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; register gdFixed f_a = 0, f_b = 0, f_d = 0, f_c = 0; register gdFixed f_RY; int l; if (f_fp2 > 0) f_a = gd_mulfx(f_fp2, gd_mulfx(f_fp2,f_fp2)); if (f_fp1 > 0) f_b = gd_mulfx(f_fp1, gd_mulfx(f_fp1,f_fp1)); if (f > 0) f_c = gd_mulfx(f, gd_mulfx(f,f)); if (f_fm1 > 0) f_d = gd_mulfx(f_fm1, gd_mulfx(f_fm1,f_fm1)); f_RY = gd_divfx((f_a - gd_mulfx(f_4,f_b) + gd_mulfx(f_6,f_c) - gd_mulfx(f_4,f_d)),f_6); for (l = -1; l < 3; l++) { const gdFixed f = gd_itofx(l) - f_g; const gdFixed f_fm1 = f - f_1; const gdFixed f_fp1 = f + f_1; const gdFixed f_fp2 = f + f_2; register gdFixed f_a = 0, f_b = 0, f_c = 0, f_d = 0; register gdFixed f_RX, f_R, f_rs, f_gs, f_bs, f_ba; register int c; const int _k = ((k+1)*4) + (l+1); if (f_fp2 > 0) f_a = gd_mulfx(f_fp2,gd_mulfx(f_fp2,f_fp2)); if (f_fp1 > 0) f_b = gd_mulfx(f_fp1,gd_mulfx(f_fp1,f_fp1)); if (f > 0) f_c = gd_mulfx(f,gd_mulfx(f,f)); if (f_fm1 > 0) f_d = gd_mulfx(f_fm1,gd_mulfx(f_fm1,f_fm1)); f_RX = gd_divfx((f_a-gd_mulfx(f_4,f_b)+gd_mulfx(f_6,f_c)-gd_mulfx(f_4,f_d)),f_6); f_R = gd_mulfx(f_RY,f_RX); c = src->tpixels[*(src_offset_y + _k)][*(src_offset_x + _k)]; f_rs = gd_itofx(gdTrueColorGetRed(c)); f_gs = gd_itofx(gdTrueColorGetGreen(c)); f_bs = gd_itofx(gdTrueColorGetBlue(c)); f_ba = gd_itofx(gdTrueColorGetAlpha(c)); f_red += gd_mulfx(f_rs,f_R); f_green += gd_mulfx(f_gs,f_R); f_blue += gd_mulfx(f_bs,f_R); f_alpha += gd_mulfx(f_ba,f_R); } } red = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_red, f_gamma)), 0, 255); green = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_green, f_gamma)), 0, 255); blue = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_blue, f_gamma)), 0, 255); alpha = (unsigned char) CLAMP(gd_fxtoi(gd_mulfx(f_alpha, f_gamma)), 0, 127); *(dst_row + dst_offset_x) = gdTrueColorAlpha(red, green, blue, alpha); dst_offset_x++; } dst_offset_y++; } return dst; } /** * Function: gdImageScale * * Scale an image * * Creates a new image, scaled to the requested size using the current * <gdInterpolationMethod>. * * Note that GD_WEIGHTED4 is not yet supported by this function. * * Parameters: * src - The source image. * new_width - The new width. * new_height - The new height. * * Returns: * The scaled image on success, NULL on failure. * * See also: * - <gdImageCopyResized> * - <gdImageCopyResampled> */ BGD_DECLARE(gdImagePtr) gdImageScale(const gdImagePtr src, const unsigned int new_width, const unsigned int new_height) { gdImagePtr im_scaled = NULL; if (src == NULL || (uintmax_t)src->interpolation_id >= GD_METHOD_COUNT) { return NULL; } if (new_width == 0 || new_height == 0) { return NULL; } if (new_width == gdImageSX(src) && new_height == gdImageSY(src)) { return gdImageClone(src); } switch (src->interpolation_id) { /*Special cases, optimized implementations */ case GD_NEAREST_NEIGHBOUR: im_scaled = gdImageScaleNearestNeighbour(src, new_width, new_height); break; case GD_BILINEAR_FIXED: case GD_LINEAR: im_scaled = gdImageScaleBilinear(src, new_width, new_height); break; case GD_BICUBIC_FIXED: case GD_BICUBIC: im_scaled = gdImageScaleBicubicFixed(src, new_width, new_height); break; /* generic */ default: if (src->interpolation == NULL) { return NULL; } im_scaled = gdImageScaleTwoPass(src, new_width, new_height); break; } return im_scaled; } static int gdRotatedImageSize(gdImagePtr src, const float angle, gdRectPtr bbox) { gdRect src_area; double m[6]; gdAffineRotate(m, angle); src_area.x = 0; src_area.y = 0; src_area.width = gdImageSX(src); src_area.height = gdImageSY(src); if (gdTransformAffineBoundingBox(&src_area, m, bbox) != GD_TRUE) { return GD_FALSE; } return GD_TRUE; } static gdImagePtr gdImageRotateNearestNeighbour(gdImagePtr src, const float degrees, const int bgColor) { float _angle = ((float) (-degrees / 180.0f) * (float)M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_0_5 = gd_ftofx(0.5f); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; gdRect bbox; int new_height, new_width; gdRotatedImageSize(src, degrees, &bbox); new_width = bbox.width; new_height = bbox.height; dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j = 0; j < new_width; j++) { gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_0_5 + f_H; gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_0_5 + f_W; long m = gd_fxtoi(f_m); long n = gd_fxtoi(f_n); if ((m > 0) && (m < src_h-1) && (n > 0) && (n < src_w-1)) { if (dst_offset_y < new_height) { dst->tpixels[dst_offset_y][dst_offset_x++] = src->tpixels[m][n]; } } else { if (dst_offset_y < new_height) { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } } } dst_offset_y++; } return dst; } static gdImagePtr gdImageRotateGeneric(gdImagePtr src, const float degrees, const int bgColor) { float _angle = ((float) (-degrees / 180.0f) * (float)M_PI); const int src_w = gdImageSX(src); const int src_h = gdImageSY(src); const gdFixed f_H = gd_itofx(src_h/2); const gdFixed f_W = gd_itofx(src_w/2); const gdFixed f_cos = gd_ftofx(cos(-_angle)); const gdFixed f_sin = gd_ftofx(sin(-_angle)); unsigned int dst_offset_x; unsigned int dst_offset_y = 0; unsigned int i; gdImagePtr dst; int new_width, new_height; gdRect bbox; if (bgColor < 0) { return NULL; } if (src->interpolation == NULL) { gdImageSetInterpolationMethod(src, GD_DEFAULT); } gdRotatedImageSize(src, degrees, &bbox); new_width = bbox.width; new_height = bbox.height; dst = gdImageCreateTrueColor(new_width, new_height); if (!dst) { return NULL; } dst->saveAlphaFlag = 1; for (i = 0; i < new_height; i++) { unsigned int j; dst_offset_x = 0; for (j = 0; j < new_width; j++) { gdFixed f_i = gd_itofx((int)i - (int)new_height / 2); gdFixed f_j = gd_itofx((int)j - (int)new_width / 2); gdFixed f_m = gd_mulfx(f_j,f_sin) + gd_mulfx(f_i,f_cos) + f_H; gdFixed f_n = gd_mulfx(f_j,f_cos) - gd_mulfx(f_i,f_sin) + f_W; long m = gd_fxtoi(f_m); long n = gd_fxtoi(f_n); if (m < -1 || n < -1 || m >= src_h || n >= src_w ) { dst->tpixels[dst_offset_y][dst_offset_x++] = bgColor; } else { dst->tpixels[dst_offset_y][dst_offset_x++] = getPixelInterpolated(src, gd_fxtod(f_n), gd_fxtod(f_m), bgColor); } } dst_offset_y++; } return dst; } /** * Function: gdImageRotateInterpolated * * Rotate an image * * Creates a new image, counter-clockwise rotated by the requested angle * using the current <gdInterpolationMethod>. Non-square angles will add a * border with bgcolor. * * Parameters: * src - The source image. * angle - The angle in degrees. * bgcolor - The color to fill the added background with. * * Returns: * The rotated image on success, NULL on failure. * * See also: * - <gdImageCopyRotated> */ BGD_DECLARE(gdImagePtr) gdImageRotateInterpolated(const gdImagePtr src, const float angle, int bgcolor) { /* round to two decimals and keep the 100x multiplication to use it in the common square angles case later. Keep the two decimal precisions so smaller rotation steps can be done, useful for slow animations, f.e. */ const int angle_rounded = fmod((int) floorf(angle * 100), 360 * 100); if (src == NULL || bgcolor < 0) { return NULL; } /* impact perf a bit, but not that much. Implementation for palette images can be done at a later point. */ if (src->trueColor == 0) { if (bgcolor < gdMaxColors) { bgcolor = gdTrueColorAlpha(src->red[bgcolor], src->green[bgcolor], src->blue[bgcolor], src->alpha[bgcolor]); } gdImagePaletteToTrueColor(src); } /* 0 && 90 degrees multiple rotation, 0 rotation simply clones the return image and convert it to truecolor, as we must return truecolor image. */ switch (angle_rounded) { case 0: { gdImagePtr dst = gdImageClone(src); if (dst == NULL) { return NULL; } if (dst->trueColor == 0) { gdImagePaletteToTrueColor(dst); } return dst; } case -27000: case 9000: return gdImageRotate90(src, 0); case -18000: case 18000: return gdImageRotate180(src, 0); case -9000: case 27000: return gdImageRotate270(src, 0); } if (src->interpolation_id < 1 || src->interpolation_id > GD_METHOD_COUNT) { return NULL; } switch (src->interpolation_id) { case GD_NEAREST_NEIGHBOUR: return gdImageRotateNearestNeighbour(src, angle, bgcolor); break; case GD_BILINEAR_FIXED: case GD_BICUBIC_FIXED: default: return gdImageRotateGeneric(src, angle, bgcolor); } return NULL; } /** * Group: Affine Transformation **/ static void gdImageClipRectangle(gdImagePtr im, gdRectPtr r) { int c1x, c1y, c2x, c2y; int x1,y1; gdImageGetClip(im, &c1x, &c1y, &c2x, &c2y); x1 = r->x + r->width - 1; y1 = r->y + r->height - 1; r->x = CLAMP(r->x, c1x, c2x); r->y = CLAMP(r->y, c1y, c2y); r->width = CLAMP(x1, c1x, c2x) - r->x + 1; r->height = CLAMP(y1, c1y, c2y) - r->y + 1; } void gdDumpRect(const char *msg, gdRectPtr r) { printf("%s (%i, %i) (%i, %i)\n", msg, r->x, r->y, r->width, r->height); } /** * Function: gdTransformAffineGetImage * Applies an affine transformation to a region and return an image * containing the complete transformation. * * Parameters: * dst - Pointer to a gdImagePtr to store the created image, NULL when * the creation or the transformation failed * src - Source image * src_area - rectangle defining the source region to transform * dstY - Y position in the destination image * affine - The desired affine transformation * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]) { int res; double m[6]; gdRect bbox; gdRect area_full; if (src_area == NULL) { area_full.x = 0; area_full.y = 0; area_full.width = gdImageSX(src); area_full.height = gdImageSY(src); src_area = &area_full; } gdTransformAffineBoundingBox(src_area, affine, &bbox); *dst = gdImageCreateTrueColor(bbox.width, bbox.height); if (*dst == NULL) { return GD_FALSE; } (*dst)->saveAlphaFlag = 1; if (!src->trueColor) { gdImagePaletteToTrueColor(src); } /* Translate to dst origin (0,0) */ gdAffineTranslate(m, -bbox.x, -bbox.y); gdAffineConcat(m, affine, m); gdImageAlphaBlending(*dst, 0); res = gdTransformAffineCopy(*dst, 0,0, src, src_area, m); if (res != GD_TRUE) { gdImageDestroy(*dst); *dst = NULL; return GD_FALSE; } else { return GD_TRUE; } } /** * Function: gdTransformAffineCopy * Applies an affine transformation to a region and copy the result * in a destination to the given position. * * Parameters: * dst - Image to draw the transformed image * src - Source image * dstX - X position in the destination image * dstY - Y position in the destination image * src_area - Rectangular region to rotate in the src image * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineCopy(gdImagePtr dst, int dst_x, int dst_y, const gdImagePtr src, gdRectPtr src_region, const double affine[6]) { int c1x,c1y,c2x,c2y; int backclip = 0; int backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2; register int x, y, src_offset_x, src_offset_y; double inv[6]; int *dst_p; gdPointF pt, src_pt; gdRect bbox; int end_x, end_y; gdInterpolationMethod interpolation_id_bak = GD_DEFAULT; /* These methods use special implementations */ if (src->interpolation_id == GD_BILINEAR_FIXED || src->interpolation_id == GD_BICUBIC_FIXED || src->interpolation_id == GD_NEAREST_NEIGHBOUR) { interpolation_id_bak = src->interpolation_id; gdImageSetInterpolationMethod(src, GD_BICUBIC); } gdImageClipRectangle(src, src_region); if (src_region->x > 0 || src_region->y > 0 || src_region->width < gdImageSX(src) || src_region->height < gdImageSY(src)) { backclip = 1; gdImageGetClip(src, &backup_clipx1, &backup_clipy1, &backup_clipx2, &backup_clipy2); gdImageSetClip(src, src_region->x, src_region->y, src_region->x + src_region->width - 1, src_region->y + src_region->height - 1); } if (!gdTransformAffineBoundingBox(src_region, affine, &bbox)) { if (backclip) { gdImageSetClip(src, backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2); } gdImageSetInterpolationMethod(src, interpolation_id_bak); return GD_FALSE; } gdImageGetClip(dst, &c1x, &c1y, &c2x, &c2y); end_x = bbox.width + abs(bbox.x); end_y = bbox.height + abs(bbox.y); /* Get inverse affine to let us work with destination -> source */ gdAffineInvert(inv, affine); src_offset_x = src_region->x; src_offset_y = src_region->y; if (dst->alphaBlendingFlag) { for (y = bbox.y; y <= end_y; y++) { pt.y = y + 0.5; for (x = 0; x <= end_x; x++) { pt.x = x + 0.5; gdAffineApplyToPointF(&src_pt, &pt, inv); gdImageSetPixel(dst, dst_x + x, dst_y + y, getPixelInterpolated(src, src_offset_x + src_pt.x, src_offset_y + src_pt.y, 0)); } } } else { for (y = 0; y <= end_y; y++) { pt.y = y + 0.5 + bbox.y; if ((dst_y + y) < 0 || ((dst_y + y) > gdImageSY(dst) -1)) { continue; } dst_p = dst->tpixels[dst_y + y] + dst_x; for (x = 0; x <= end_x; x++) { pt.x = x + 0.5 + bbox.x; gdAffineApplyToPointF(&src_pt, &pt, inv); if ((dst_x + x) < 0 || (dst_x + x) > (gdImageSX(dst) - 1)) { break; } *(dst_p++) = getPixelInterpolated(src, src_offset_x + src_pt.x, src_offset_y + src_pt.y, -1); } } } /* Restore clip if required */ if (backclip) { gdImageSetClip(src, backup_clipx1, backup_clipy1, backup_clipx2, backup_clipy2); } gdImageSetInterpolationMethod(src, interpolation_id_bak); return GD_TRUE; } /** * Function: gdTransformAffineBoundingBox * Returns the bounding box of an affine transformation applied to a * rectangular area <gdRect> * * Parameters: * src - Rectangular source area for the affine transformation * affine - the affine transformation * bbox - the resulting bounding box * * Returns: * GD_TRUE if the affine is rectilinear or GD_FALSE */ BGD_DECLARE(int) gdTransformAffineBoundingBox(gdRectPtr src, const double affine[6], gdRectPtr bbox) { gdPointF extent[4], min, max, point; int i; extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) src->width; extent[1].y=0.0; extent[2].x=(double) src->width; extent[2].y=(double) src->height; extent[3].x=0.0; extent[3].y=(double) src->height; for (i=0; i < 4; i++) { point=extent[i]; if (gdAffineApplyToPointF(&extent[i], &point, affine) != GD_TRUE) { return GD_FALSE; } } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } bbox->x = (int) min.x; bbox->y = (int) min.y; bbox->width = (int) ceil((max.x - min.x)) + 1; bbox->height = (int) ceil(max.y - min.y) + 1; return GD_TRUE; } /** * Group: Interpolation Method */ /** * Function: gdImageSetInterpolationMethod * * Set the interpolation method for subsequent operations * * Parameters: * im - The image. * id - The interpolation method. * * Returns: * Non-zero on success, zero on failure. * * See also: * - <gdInterpolationMethod> * - <gdImageGetInterpolationMethod> */ BGD_DECLARE(int) gdImageSetInterpolationMethod(gdImagePtr im, gdInterpolationMethod id) { if (im == NULL || (uintmax_t)id > GD_METHOD_COUNT) { return 0; } switch (id) { case GD_NEAREST_NEIGHBOUR: case GD_WEIGHTED4: im->interpolation = NULL; break; /* generic versions*/ /* GD_BILINEAR_FIXED and GD_BICUBIC_FIXED are kept for BC reasons */ case GD_BILINEAR_FIXED: case GD_LINEAR: im->interpolation = filter_linear; break; case GD_BELL: im->interpolation = filter_bell; break; case GD_BESSEL: im->interpolation = filter_bessel; break; case GD_BICUBIC_FIXED: case GD_BICUBIC: im->interpolation = filter_bicubic; break; case GD_BLACKMAN: im->interpolation = filter_blackman; break; case GD_BOX: im->interpolation = filter_box; break; case GD_BSPLINE: im->interpolation = filter_bspline; break; case GD_CATMULLROM: im->interpolation = filter_catmullrom; break; case GD_GAUSSIAN: im->interpolation = filter_gaussian; break; case GD_GENERALIZED_CUBIC: im->interpolation = filter_generalized_cubic; break; case GD_HERMITE: im->interpolation = filter_hermite; break; case GD_HAMMING: im->interpolation = filter_hamming; break; case GD_HANNING: im->interpolation = filter_hanning; break; case GD_MITCHELL: im->interpolation = filter_mitchell; break; case GD_POWER: im->interpolation = filter_power; break; case GD_QUADRATIC: im->interpolation = filter_quadratic; break; case GD_SINC: im->interpolation = filter_sinc; break; case GD_TRIANGLE: im->interpolation = filter_triangle; break; case GD_DEFAULT: id = GD_LINEAR; im->interpolation = filter_linear; default: return 0; break; } im->interpolation_id = id; return 1; } /** * Function: gdImageGetInterpolationMethod * * Get the current interpolation method * * This is here so that the value can be read via a language or VM with an FFI * but no (portable) way to extract the value from the struct. * * Parameters: * im - The image. * * Returns: * The current interpolation method. * * See also: * - <gdInterpolationMethod> * - <gdImageSetInterpolationMethod> */ BGD_DECLARE(gdInterpolationMethod) gdImageGetInterpolationMethod(gdImagePtr im) { return im->interpolation_id; } #ifdef _MSC_VER # pragma optimize("", on) #endif
./CrossVul/dataset_final_sorted/CWE-191/c/good_4828_0
crossvul-cpp_data_good_2968_0
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002, 2003 // Copyright Dirk Lemstra 2013-2017 // // Implementation of Image // #define MAGICKCORE_IMPLEMENTATION 1 #define MAGICK_PLUSPLUS_IMPLEMENTATION 1 #include "Magick++/Include.h" #include <cstdlib> #include <string> #include <string.h> #include <errno.h> #include <math.h> using namespace std; #include "Magick++/Image.h" #include "Magick++/Functions.h" #include "Magick++/Pixels.h" #include "Magick++/Options.h" #include "Magick++/ImageRef.h" #define AbsoluteValue(x) ((x) < 0 ? -(x) : (x)) #define MagickPI 3.14159265358979323846264338327950288419716939937510 #define DegreesToRadians(x) (MagickPI*(x)/180.0) #define ThrowImageException ThrowPPException(quiet()) MagickPPExport const char *Magick::borderGeometryDefault="6x6+0+0"; MagickPPExport const char *Magick::frameGeometryDefault="25x25+6+6"; MagickPPExport const char *Magick::raiseGeometryDefault="6x6+0+0"; MagickPPExport int Magick::operator == (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels and signature are the same, then the image is identical return((left_.rows() == right_.rows()) && (left_.columns() == right_.columns()) && (left_.signature() == right_.signature())); } MagickPPExport int Magick::operator != (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ == right_)); } MagickPPExport int Magick::operator > (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ < right_) && (left_ != right_)); } MagickPPExport int Magick::operator < (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels are less, then image is smaller return((left_.rows() * left_.columns()) < (right_.rows() * right_.columns())); } MagickPPExport int Magick::operator >= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ > right_) || (left_ == right_)); } MagickPPExport int Magick::operator <= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ < right_) || ( left_ == right_)); } Magick::Image::Image(void) : _imgRef(new ImageRef) { } Magick::Image::Image(const Blob &blob_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(blob_); quiet(false); } catch (const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_, size_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Geometry &size_,const Color &color_) : _imgRef(new ImageRef) { // xc: prefix specifies an X11 color string std::string imageSpec("xc:"); imageSpec+=color_; try { quiet(true); // Set image size size(size_); // Initialize, Allocate and Read images read(imageSpec); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Image &image_) : _imgRef(image_._imgRef) { _imgRef->increase(); } Magick::Image::Image(const Image &image_,const Geometry &geometry_) : _imgRef(new ImageRef) { const RectangleInfo geometry=geometry_; OffsetInfo offset; MagickCore::Image *image; GetPPException; image=CloneImage(image_.constImage(),geometry_.width(),geometry_.height(), MagickTrue,exceptionInfo); replaceImage(image); _imgRef->options(new Options(*image_.constOptions())); offset.x=0; offset.y=0; (void) CopyImagePixels(image,image_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } Magick::Image::Image(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) : _imgRef(new ImageRef) { try { quiet(true); read(width_,height_,map_.c_str(),type_,pixels_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const std::string &imageSpec_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(imageSpec_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::~Image() { try { if (_imgRef->decrease() == 0) delete _imgRef; } catch(Magick::Exception) { } _imgRef=(Magick::ImageRef *) NULL; } Magick::Image& Magick::Image::operator=(const Magick::Image &image_) { if (this != &image_) { image_._imgRef->increase(); if (_imgRef->decrease() == 0) delete _imgRef; // Use new image reference _imgRef=image_._imgRef; } return(*this); } void Magick::Image::adjoin(const bool flag_) { modifyImage(); options()->adjoin(flag_); } bool Magick::Image::adjoin(void) const { return(constOptions()->adjoin()); } void Magick::Image::alpha(const bool alphaFlag_) { modifyImage(); // If matte channel is requested, but image doesn't already have a // matte channel, then create an opaque matte channel. Likewise, if // the image already has a matte channel but a matte channel is not // desired, then set the matte channel to opaque. GetPPException; if ((alphaFlag_ && !constImage()->alpha_trait) || (constImage()->alpha_trait && !alphaFlag_)) SetImageAlpha(image(),OpaqueAlpha,exceptionInfo); ThrowImageException; image()->alpha_trait=alphaFlag_ ? BlendPixelTrait : UndefinedPixelTrait; } bool Magick::Image::alpha(void) const { if (constImage()->alpha_trait == BlendPixelTrait) return(true); else return(false); } void Magick::Image::matteColor(const Color &matteColor_) { modifyImage(); if (matteColor_.isValid()) { image()->matte_color=matteColor_; options()->matteColor(matteColor_); } else { // Set to default matte color Color tmpColor("#BDBDBD"); image()->matte_color=tmpColor; options()->matteColor(tmpColor); } } Magick::Color Magick::Image::matteColor(void) const { return(Color(constImage()->matte_color)); } void Magick::Image::animationDelay(const size_t delay_) { modifyImage(); image()->delay=delay_; } size_t Magick::Image::animationDelay(void) const { return(constImage()->delay); } void Magick::Image::animationIterations(const size_t iterations_) { modifyImage(); image()->iterations=iterations_; } size_t Magick::Image::animationIterations(void) const { return(constImage()->iterations); } void Magick::Image::attenuate(const double attenuate_) { char value[MagickPathExtent]; modifyImage(); FormatLocaleString(value,MagickPathExtent,"%.20g",attenuate_); (void) SetImageArtifact(image(),"attenuate",value); } void Magick::Image::backgroundColor(const Color &backgroundColor_) { modifyImage(); if (backgroundColor_.isValid()) image()->background_color=backgroundColor_; else image()->background_color=Color(); options()->backgroundColor(backgroundColor_); } Magick::Color Magick::Image::backgroundColor(void) const { return(constOptions()->backgroundColor()); } void Magick::Image::backgroundTexture(const std::string &backgroundTexture_) { modifyImage(); options()->backgroundTexture(backgroundTexture_); } std::string Magick::Image::backgroundTexture(void) const { return(constOptions()->backgroundTexture()); } size_t Magick::Image::baseColumns(void) const { return(constImage()->magick_columns); } std::string Magick::Image::baseFilename(void) const { return(std::string(constImage()->magick_filename)); } size_t Magick::Image::baseRows(void) const { return(constImage()->magick_rows); } void Magick::Image::blackPointCompensation(const bool flag_) { image()->black_point_compensation=(MagickBooleanType) flag_; } bool Magick::Image::blackPointCompensation(void) const { return(static_cast<bool>(constImage()->black_point_compensation)); } void Magick::Image::borderColor(const Color &borderColor_) { modifyImage(); if (borderColor_.isValid()) image()->border_color=borderColor_; else image()->border_color=Color(); options()->borderColor(borderColor_); } Magick::Color Magick::Image::borderColor(void) const { return(constOptions()->borderColor()); } Magick::Geometry Magick::Image::boundingBox(void) const { RectangleInfo bbox; GetPPException; bbox=GetImageBoundingBox(constImage(),exceptionInfo); ThrowImageException; return(Geometry(bbox)); } void Magick::Image::boxColor(const Color &boxColor_) { modifyImage(); options()->boxColor(boxColor_); } Magick::Color Magick::Image::boxColor(void) const { return(constOptions()->boxColor()); } void Magick::Image::channelDepth(const ChannelType channel_, const size_t depth_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); SetImageDepth(image(),depth_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } size_t Magick::Image::channelDepth(const ChannelType channel_) { size_t channel_depth; GetPPException; GetAndSetPPChannelMask(channel_); channel_depth=GetImageDepth(constImage(),exceptionInfo); RestorePPChannelMask; ThrowImageException; return(channel_depth); } size_t Magick::Image::channels() const { return(constImage()->number_channels); } void Magick::Image::classType(const ClassType class_) { if (classType() == PseudoClass && class_ == DirectClass) { // Use SyncImage to synchronize the DirectClass pixels with the // color map and then set to DirectClass type. modifyImage(); GetPPException; SyncImage(image(),exceptionInfo); ThrowImageException; image()->colormap=(PixelInfo *)RelinquishMagickMemory(image()->colormap); image()->storage_class=static_cast<MagickCore::ClassType>(DirectClass); return; } if (classType() == DirectClass && class_ == PseudoClass) { // Quantize to create PseudoClass color map modifyImage(); quantizeColors(MaxColormapSize); quantize(); image()->storage_class=static_cast<MagickCore::ClassType>(PseudoClass); } } Magick::ClassType Magick::Image::classType(void) const { return static_cast<Magick::ClassType>(constImage()->storage_class); } void Magick::Image::colorFuzz(const double fuzz_) { modifyImage(); image()->fuzz=fuzz_; options()->colorFuzz(fuzz_); } double Magick::Image::colorFuzz(void) const { return(constOptions()->colorFuzz()); } void Magick::Image::colorMapSize(const size_t entries_) { if (entries_ >MaxColormapSize) throwExceptionExplicit(MagickCore::OptionError, "Colormap entries must not exceed MaxColormapSize"); modifyImage(); GetPPException; (void) AcquireImageColormap(image(),entries_,exceptionInfo); ThrowImageException; } size_t Magick::Image::colorMapSize(void) const { if (!constImage()->colormap) throwExceptionExplicit(MagickCore::OptionError, "Image does not contain a colormap"); return(constImage()->colors); } void Magick::Image::colorSpace(const ColorspaceType colorSpace_) { if (image()->colorspace == colorSpace_) return; modifyImage(); GetPPException; TransformImageColorspace(image(),colorSpace_,exceptionInfo); ThrowImageException; } Magick::ColorspaceType Magick::Image::colorSpace(void) const { return (constImage()->colorspace); } void Magick::Image::colorSpaceType(const ColorspaceType colorSpace_) { modifyImage(); GetPPException; SetImageColorspace(image(),colorSpace_,exceptionInfo); ThrowImageException; options()->colorspaceType(colorSpace_); } Magick::ColorspaceType Magick::Image::colorSpaceType(void) const { return(constOptions()->colorspaceType()); } size_t Magick::Image::columns(void) const { return(constImage()->columns); } void Magick::Image::comment(const std::string &comment_) { modifyImage(); GetPPException; SetImageProperty(image(),"Comment",NULL,exceptionInfo); if (comment_.length() > 0) SetImageProperty(image(),"Comment",comment_.c_str(),exceptionInfo); ThrowImageException; } std::string Magick::Image::comment(void) const { const char *value; GetPPException; value=GetImageProperty(constImage(),"Comment",exceptionInfo); ThrowImageException; if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::compose(const CompositeOperator compose_) { image()->compose=compose_; } Magick::CompositeOperator Magick::Image::compose(void) const { return(constImage()->compose); } void Magick::Image::compressType(const CompressionType compressType_) { modifyImage(); image()->compression=compressType_; options()->compressType(compressType_); } Magick::CompressionType Magick::Image::compressType(void) const { return(constImage()->compression); } void Magick::Image::debug(const bool flag_) { modifyImage(); options()->debug(flag_); } bool Magick::Image::debug(void) const { return(constOptions()->debug()); } void Magick::Image::density(const Point &density_) { modifyImage(); options()->density(density_); if (density_.isValid()) { image()->resolution.x=density_.x(); if (density_.y() != 0.0) image()->resolution.y=density_.y(); else image()->resolution.y=density_.x(); } else { // Reset to default image()->resolution.x=0.0; image()->resolution.y=0.0; } } Magick::Point Magick::Image::density(void) const { if (isValid()) { ssize_t x_resolution=72, y_resolution=72; if (constImage()->resolution.x > 0.0) x_resolution=constImage()->resolution.x; if (constImage()->resolution.y > 0.0) y_resolution=constImage()->resolution.y; return(Point(x_resolution,y_resolution)); } return(constOptions()->density()); } void Magick::Image::depth(const size_t depth_) { size_t depth = depth_; if (depth > MAGICKCORE_QUANTUM_DEPTH) depth=MAGICKCORE_QUANTUM_DEPTH; modifyImage(); image()->depth=depth; options()->depth(depth); } size_t Magick::Image::depth(void) const { return(constImage()->depth); } std::string Magick::Image::directory(void) const { if (constImage()->directory) return(std::string(constImage()->directory)); if (!quiet()) throwExceptionExplicit(MagickCore::CorruptImageWarning, "Image does not contain a directory"); return(std::string()); } void Magick::Image::endian(const Magick::EndianType endian_) { modifyImage(); options()->endian(endian_); image()->endian=endian_; } Magick::EndianType Magick::Image::endian(void) const { return(constImage()->endian); } void Magick::Image::exifProfile(const Magick::Blob &exifProfile_) { modifyImage(); if (exifProfile_.data() != 0) { StringInfo *exif_profile; exif_profile=AcquireStringInfo(exifProfile_.length()); SetStringInfoDatum(exif_profile,(unsigned char *) exifProfile_.data()); GetPPException; (void) SetImageProfile(image(),"exif",exif_profile,exceptionInfo); exif_profile=DestroyStringInfo(exif_profile); ThrowImageException; } } Magick::Blob Magick::Image::exifProfile(void) const { const StringInfo *exif_profile; exif_profile=GetImageProfile(constImage(),"exif"); if (exif_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(exif_profile), GetStringInfoLength(exif_profile))); } void Magick::Image::fileName(const std::string &fileName_) { modifyImage(); fileName_.copy(image()->filename,sizeof(image()->filename)-1); image()->filename[fileName_.length()]=0; // Null terminate options()->fileName(fileName_); } std::string Magick::Image::fileName(void) const { return(constOptions()->fileName()); } MagickCore::MagickSizeType Magick::Image::fileSize(void) const { return(GetBlobSize(constImage())); } void Magick::Image::fillColor(const Magick::Color &fillColor_) { modifyImage(); options()->fillColor(fillColor_); } Magick::Color Magick::Image::fillColor(void) const { return(constOptions()->fillColor()); } void Magick::Image::fillRule(const Magick::FillRule &fillRule_) { modifyImage(); options()->fillRule(fillRule_); } Magick::FillRule Magick::Image::fillRule(void) const { return constOptions()->fillRule(); } void Magick::Image::fillPattern(const Image &fillPattern_) { modifyImage(); if (fillPattern_.isValid()) options()->fillPattern(fillPattern_.constImage()); else options()->fillPattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::fillPattern(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->fillPattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::filterType(const Magick::FilterType filterType_) { modifyImage(); image()->filter=filterType_; } Magick::FilterType Magick::Image::filterType(void) const { return(constImage()->filter); } void Magick::Image::font(const std::string &font_) { modifyImage(); options()->font(font_); } std::string Magick::Image::font(void) const { return(constOptions()->font()); } void Magick::Image::fontFamily(const std::string &family_) { modifyImage(); options()->fontFamily(family_); } std::string Magick::Image::fontFamily(void) const { return(constOptions()->fontFamily()); } void Magick::Image::fontPointsize(const double pointSize_) { modifyImage(); options()->fontPointsize(pointSize_); } double Magick::Image::fontPointsize(void) const { return(constOptions()->fontPointsize()); } void Magick::Image::fontStyle(const StyleType pointSize_) { modifyImage(); options()->fontStyle(pointSize_); } Magick::StyleType Magick::Image::fontStyle(void) const { return(constOptions()->fontStyle()); } void Magick::Image::fontWeight(const size_t weight_) { modifyImage(); options()->fontWeight(weight_); } size_t Magick::Image::fontWeight(void) const { return(constOptions()->fontWeight()); } std::string Magick::Image::format(void) const { const MagickInfo *magick_info; GetPPException; magick_info=GetMagickInfo(constImage()->magick,exceptionInfo); ThrowImageException; if ((magick_info != 0) && (*magick_info->description != '\0')) return(std::string(magick_info->description)); if (!quiet()) throwExceptionExplicit(MagickCore::CorruptImageWarning, "Unrecognized image magick type"); return(std::string()); } std::string Magick::Image::formatExpression(const std::string expression) { char *text; std::string text_string; GetPPException; modifyImage(); text=InterpretImageProperties(imageInfo(),image(),expression.c_str(), exceptionInfo); if (text != (char *) NULL) { text_string=std::string(text); text=DestroyString(text); } ThrowImageException; return(text_string); } double Magick::Image::gamma(void) const { return(constImage()->gamma); } Magick::Geometry Magick::Image::geometry(void) const { if (constImage()->geometry) return Geometry(constImage()->geometry); if (!quiet()) throwExceptionExplicit(MagickCore::OptionWarning, "Image does not contain a geometry"); return(Geometry()); } void Magick::Image::gifDisposeMethod( const MagickCore::DisposeType disposeMethod_) { modifyImage(); image()->dispose=disposeMethod_; } MagickCore::DisposeType Magick::Image::gifDisposeMethod(void) const { return(constImage()->dispose); } bool Magick::Image::hasChannel(const PixelChannel channel) const { if (GetPixelChannelTraits(constImage(),channel) == UndefinedPixelTrait) return(false); if (channel == GreenPixelChannel || channel == BluePixelChannel) return (GetPixelChannelOffset(constImage(),channel) == (ssize_t)channel); return(true); } void Magick::Image::highlightColor(const Color color_) { std::string value; value=color_; artifact("compare:highlight-color",value); } void Magick::Image::iccColorProfile(const Magick::Blob &colorProfile_) { profile("icc",colorProfile_); } Magick::Blob Magick::Image::iccColorProfile(void) const { const StringInfo *color_profile; color_profile=GetImageProfile(constImage(),"icc"); if (color_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(color_profile),GetStringInfoLength( color_profile))); } void Magick::Image::interlaceType(const Magick::InterlaceType interlace_) { modifyImage(); image()->interlace=interlace_; options()->interlaceType(interlace_); } Magick::InterlaceType Magick::Image::interlaceType(void) const { return(constImage()->interlace); } void Magick::Image::interpolate(const PixelInterpolateMethod interpolate_) { modifyImage(); image()->interpolate=interpolate_; } Magick::PixelInterpolateMethod Magick::Image::interpolate(void) const { return constImage()->interpolate; } void Magick::Image::iptcProfile(const Magick::Blob &iptcProfile_) { modifyImage(); if (iptcProfile_.data() != 0) { StringInfo *iptc_profile; iptc_profile=AcquireStringInfo(iptcProfile_.length()); SetStringInfoDatum(iptc_profile,(unsigned char *) iptcProfile_.data()); GetPPException; (void) SetImageProfile(image(),"iptc",iptc_profile,exceptionInfo); iptc_profile=DestroyStringInfo(iptc_profile); ThrowImageException; } } Magick::Blob Magick::Image::iptcProfile(void) const { const StringInfo *iptc_profile; iptc_profile=GetImageProfile(constImage(),"iptc"); if (iptc_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(iptc_profile),GetStringInfoLength( iptc_profile))); } bool Magick::Image::isOpaque(void) const { MagickBooleanType result; GetPPException; result=IsImageOpaque(constImage(),exceptionInfo); ThrowImageException; return(result != MagickFalse ? true : false); } void Magick::Image::isValid(const bool isValid_) { if (!isValid_) { delete _imgRef; _imgRef=new ImageRef; } else if (!isValid()) { // Construct with single-pixel black image to make // image valid. This is an obvious hack. size(Geometry(1,1)); read("xc:black"); } } bool Magick::Image::isValid(void) const { return rows() && columns(); } void Magick::Image::label(const std::string &label_) { modifyImage(); GetPPException; (void) SetImageProperty(image(),"Label",NULL,exceptionInfo); if (label_.length() > 0) (void) SetImageProperty(image(),"Label",label_.c_str(),exceptionInfo); ThrowImageException; } std::string Magick::Image::label(void) const { const char *value; GetPPException; value=GetImageProperty(constImage(),"Label",exceptionInfo); ThrowImageException; if (value) return(std::string(value)); return(std::string()); } void Magick::Image::lowlightColor(const Color color_) { std::string value; value=color_; artifact("compare:lowlight-color",value); } void Magick::Image::magick(const std::string &magick_) { size_t length; modifyImage(); length=sizeof(image()->magick)-1; if (magick_.length() < length) length=magick_.length(); if (!magick_.empty()) magick_.copy(image()->magick,length); image()->magick[length]=0; options()->magick(magick_); } std::string Magick::Image::magick(void) const { if (*(constImage()->magick) != '\0') return(std::string(constImage()->magick)); return(constOptions()->magick()); } void Magick::Image::masklightColor(const Color color_) { std::string value; value=color_; artifact("compare:masklight-color",value); } double Magick::Image::meanErrorPerPixel(void) const { return(constImage()->error.mean_error_per_pixel); } void Magick::Image::modulusDepth(const size_t depth_) { modifyImage(); GetPPException; SetImageDepth(image(),depth_,exceptionInfo); ThrowImageException; options()->depth(depth_); } size_t Magick::Image::modulusDepth(void) const { size_t depth; GetPPException; depth=GetImageDepth(constImage(),exceptionInfo); ThrowImageException; return(depth); } void Magick::Image::monochrome(const bool monochromeFlag_) { modifyImage(); options()->monochrome(monochromeFlag_); } bool Magick::Image::monochrome(void) const { return(constOptions()->monochrome()); } Magick::Geometry Magick::Image::montageGeometry(void) const { if (constImage()->montage) return Magick::Geometry(constImage()->montage); if (!quiet()) throwExceptionExplicit(MagickCore::CorruptImageWarning, "Image does not contain a montage"); return(Magick::Geometry()); } double Magick::Image::normalizedMaxError(void) const { return(constImage()->error.normalized_maximum_error); } double Magick::Image::normalizedMeanError(void) const { return(constImage()->error.normalized_mean_error); } void Magick::Image::orientation(const Magick::OrientationType orientation_) { modifyImage(); image()->orientation=orientation_; } Magick::OrientationType Magick::Image::orientation(void) const { return(constImage()->orientation); } void Magick::Image::page(const Magick::Geometry &pageSize_) { modifyImage(); options()->page(pageSize_); image()->page=pageSize_; } Magick::Geometry Magick::Image::page(void) const { return(Geometry(constImage()->page.width,constImage()->page.height, constImage()->page.x,constImage()->page.y)); } void Magick::Image::quality(const size_t quality_) { modifyImage(); image()->quality=quality_; options()->quality(quality_); } size_t Magick::Image::quality(void) const { return(constImage()->quality); } void Magick::Image::quantizeColors(const size_t colors_) { modifyImage(); options()->quantizeColors(colors_); } size_t Magick::Image::quantizeColors(void) const { return(constOptions()->quantizeColors()); } void Magick::Image::quantizeColorSpace( const Magick::ColorspaceType colorSpace_) { modifyImage(); options()->quantizeColorSpace(colorSpace_); } Magick::ColorspaceType Magick::Image::quantizeColorSpace(void) const { return(constOptions()->quantizeColorSpace()); } void Magick::Image::quantizeDither(const bool ditherFlag_) { modifyImage(); options()->quantizeDither(ditherFlag_); } bool Magick::Image::quantizeDither(void) const { return(constOptions()->quantizeDither()); } void Magick::Image::quantizeDitherMethod(const DitherMethod ditherMethod_) { modifyImage(); options()->quantizeDitherMethod(ditherMethod_); } MagickCore::DitherMethod Magick::Image::quantizeDitherMethod(void) const { return(constOptions()->quantizeDitherMethod()); } void Magick::Image::quantizeTreeDepth(const size_t treeDepth_) { modifyImage(); options()->quantizeTreeDepth(treeDepth_); } size_t Magick::Image::quantizeTreeDepth() const { return(constOptions()->quantizeTreeDepth()); } void Magick::Image::quiet(const bool quiet_) { modifyImage(); options()->quiet(quiet_); } bool Magick::Image::quiet(void) const { return(constOptions()->quiet()); } void Magick::Image::renderingIntent( const Magick::RenderingIntent renderingIntent_) { modifyImage(); image()->rendering_intent=renderingIntent_; } Magick::RenderingIntent Magick::Image::renderingIntent(void) const { return(static_cast<Magick::RenderingIntent>(constImage()->rendering_intent)); } void Magick::Image::resolutionUnits( const Magick::ResolutionType resolutionUnits_) { modifyImage(); image()->units=resolutionUnits_; options()->resolutionUnits(resolutionUnits_); } Magick::ResolutionType Magick::Image::resolutionUnits(void) const { return(static_cast<Magick::ResolutionType>(constImage()->units)); } size_t Magick::Image::rows(void) const { return(constImage()->rows); } void Magick::Image::scene(const size_t scene_) { modifyImage(); image()->scene=scene_; } size_t Magick::Image::scene(void) const { return(constImage()->scene); } void Magick::Image::size(const Geometry &geometry_) { modifyImage(); options()->size(geometry_); image()->rows=geometry_.height(); image()->columns=geometry_.width(); } Magick::Geometry Magick::Image::size(void) const { return(Magick::Geometry(constImage()->columns,constImage()->rows)); } void Magick::Image::strokeAntiAlias(const bool flag_) { modifyImage(); options()->strokeAntiAlias(flag_); } bool Magick::Image::strokeAntiAlias(void) const { return(constOptions()->strokeAntiAlias()); } void Magick::Image::strokeColor(const Magick::Color &strokeColor_) { std::string value; modifyImage(); options()->strokeColor(strokeColor_); value=strokeColor_; artifact("stroke",value); } Magick::Color Magick::Image::strokeColor(void) const { return(constOptions()->strokeColor()); } void Magick::Image::strokeDashArray(const double *strokeDashArray_) { modifyImage(); options()->strokeDashArray(strokeDashArray_); } const double* Magick::Image::strokeDashArray(void) const { return(constOptions()->strokeDashArray()); } void Magick::Image::strokeDashOffset(const double strokeDashOffset_) { modifyImage(); options()->strokeDashOffset(strokeDashOffset_); } double Magick::Image::strokeDashOffset(void) const { return(constOptions()->strokeDashOffset()); } void Magick::Image::strokeLineCap(const Magick::LineCap lineCap_) { modifyImage(); options()->strokeLineCap(lineCap_); } Magick::LineCap Magick::Image::strokeLineCap(void) const { return(constOptions()->strokeLineCap()); } void Magick::Image::strokeLineJoin(const Magick::LineJoin lineJoin_) { modifyImage(); options()->strokeLineJoin(lineJoin_); } Magick::LineJoin Magick::Image::strokeLineJoin(void) const { return(constOptions()->strokeLineJoin()); } void Magick::Image::strokeMiterLimit(const size_t strokeMiterLimit_) { modifyImage(); options()->strokeMiterLimit(strokeMiterLimit_); } size_t Magick::Image::strokeMiterLimit(void) const { return(constOptions()->strokeMiterLimit()); } void Magick::Image::strokePattern(const Image &strokePattern_) { modifyImage(); if(strokePattern_.isValid()) options()->strokePattern(strokePattern_.constImage()); else options()->strokePattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::strokePattern(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->strokePattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::strokeWidth(const double strokeWidth_) { char value[MagickPathExtent]; modifyImage(); options()->strokeWidth(strokeWidth_); FormatLocaleString(value,MagickPathExtent,"%.20g",strokeWidth_); (void) SetImageArtifact(image(),"strokewidth",value); } double Magick::Image::strokeWidth(void) const { return(constOptions()->strokeWidth()); } void Magick::Image::subImage(const size_t subImage_) { modifyImage(); options()->subImage(subImage_); } size_t Magick::Image::subImage(void) const { return(constOptions()->subImage()); } void Magick::Image::subRange(const size_t subRange_) { modifyImage(); options()->subRange(subRange_); } size_t Magick::Image::subRange(void) const { return(constOptions()->subRange()); } void Magick::Image::textAntiAlias(const bool flag_) { modifyImage(); options()->textAntiAlias(flag_); } bool Magick::Image::textAntiAlias(void) const { return(constOptions()->textAntiAlias()); } void Magick::Image::textDirection(DirectionType direction_) { modifyImage(); options()->textDirection(direction_); } Magick::DirectionType Magick::Image::textDirection(void) const { return(constOptions()->textDirection()); } void Magick::Image::textEncoding(const std::string &encoding_) { modifyImage(); options()->textEncoding(encoding_); } std::string Magick::Image::textEncoding(void) const { return(constOptions()->textEncoding()); } void Magick::Image::textGravity(GravityType gravity_) { modifyImage(); options()->textGravity(gravity_); } Magick::GravityType Magick::Image::textGravity(void) const { return(constOptions()->textGravity()); } void Magick::Image::textInterlineSpacing(double spacing_) { modifyImage(); options()->textInterlineSpacing(spacing_); } double Magick::Image::textInterlineSpacing(void) const { return(constOptions()->textInterlineSpacing()); } void Magick::Image::textInterwordSpacing(double spacing_) { modifyImage(); options()->textInterwordSpacing(spacing_); } double Magick::Image::textInterwordSpacing(void) const { return(constOptions()->textInterwordSpacing()); } void Magick::Image::textKerning(double kerning_) { modifyImage(); options()->textKerning(kerning_); } double Magick::Image::textKerning(void) const { return(constOptions()->textKerning()); } void Magick::Image::textUnderColor(const Color &underColor_) { modifyImage(); options()->textUnderColor(underColor_); } Magick::Color Magick::Image::textUnderColor(void) const { return(constOptions()->textUnderColor()); } size_t Magick::Image::totalColors(void) const { size_t colors; GetPPException; colors=GetNumberColors(constImage(),(FILE *) NULL,exceptionInfo); ThrowImageException; return colors; } void Magick::Image::transformRotation(const double angle_) { modifyImage(); options()->transformRotation(angle_); } void Magick::Image::transformSkewX(const double skewx_) { modifyImage(); options()->transformSkewX(skewx_); } void Magick::Image::transformSkewY(const double skewy_) { modifyImage(); options()->transformSkewY(skewy_); } Magick::ImageType Magick::Image::type(void) const { if (constOptions()->type() != UndefinedType) return(constOptions()->type()); return(GetImageType(constImage())); } void Magick::Image::type(const Magick::ImageType type_) { modifyImage(); options()->type(type_); GetPPException; SetImageType(image(),type_,exceptionInfo); ThrowImageException; } void Magick::Image::verbose(const bool verboseFlag_) { modifyImage(); options()->verbose(verboseFlag_); } bool Magick::Image::verbose(void) const { return(constOptions()->verbose()); } void Magick::Image::virtualPixelMethod( const VirtualPixelMethod virtualPixelMethod_) { modifyImage(); GetPPException; SetImageVirtualPixelMethod(image(),virtualPixelMethod_,exceptionInfo); ThrowImageException; } Magick::VirtualPixelMethod Magick::Image::virtualPixelMethod(void) const { return(GetImageVirtualPixelMethod(constImage())); } void Magick::Image::x11Display(const std::string &display_) { modifyImage(); options()->x11Display(display_); } std::string Magick::Image::x11Display(void) const { return(constOptions()->x11Display()); } double Magick::Image::xResolution(void) const { return(constImage()->resolution.x); } double Magick::Image::yResolution(void) const { return(constImage()->resolution.y); } void Magick::Image::adaptiveBlur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveBlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveResize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=AdaptiveResizeImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveSharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpenChannel(const ChannelType channel_, const double radius_,const double sigma_ ) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=AdaptiveSharpenImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveThreshold(const size_t width_,const size_t height_, const double bias_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveThresholdImage(constImage(),width_,height_,bias_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoise(const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; newImage=AddNoiseImage(constImage(),noiseType_,1.0,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoiseChannel(const ChannelType channel_, const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=AddNoiseImage(constImage(),noiseType_,1.0,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::affineTransform(const DrawableAffine &affine_) { AffineMatrix _affine; MagickCore::Image *newImage; _affine.sx=affine_.sx(); _affine.sy=affine_.sy(); _affine.rx=affine_.rx(); _affine.ry=affine_.ry(); _affine.tx=affine_.tx(); _affine.ty=affine_.ty(); GetPPException; newImage=AffineTransformImage(constImage(),&_affine,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::alpha(const unsigned int alpha_) { modifyImage(); GetPPException; SetImageAlpha(image(),alpha_,exceptionInfo); ThrowImageException; } void Magick::Image::alphaChannel(AlphaChannelOption alphaOption_) { modifyImage(); GetPPException; SetImageAlphaChannel(image(),alphaOption_,exceptionInfo); ThrowImageException; } void Magick::Image::annotate(const std::string &text_, const Geometry &location_) { annotate(text_,location_,NorthWestGravity,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_) { annotate(text_,boundingArea_,gravity_,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_, const double degrees_) { AffineMatrix oaffine; char boundingArea[MagickPathExtent]; DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->geometry=DestroyString(drawInfo->geometry); if (boundingArea_.isValid()) { if (boundingArea_.width() == 0 || boundingArea_.height() == 0) { FormatLocaleString(boundingArea,MagickPathExtent,"%+.20g%+.20g", (double) boundingArea_.xOff(),(double) boundingArea_.yOff()); } else { (void) CopyMagickString(boundingArea, std::string(boundingArea_).c_str(), MagickPathExtent); } drawInfo->geometry=boundingArea; } drawInfo->gravity=gravity_; oaffine=drawInfo->affine; if (degrees_ != 0.0) { AffineMatrix affine, current; affine.sx=1.0; affine.rx=0.0; affine.ry=0.0; affine.sy=1.0; affine.tx=0.0; affine.ty=0.0; current=drawInfo->affine; affine.sx=cos(DegreesToRadians(fmod(degrees_,360.0))); affine.rx=sin(DegreesToRadians(fmod(degrees_,360.0))); affine.ry=(-sin(DegreesToRadians(fmod(degrees_,360.0)))); affine.sy=cos(DegreesToRadians(fmod(degrees_,360.0))); drawInfo->affine.sx=current.sx*affine.sx+current.ry*affine.rx; drawInfo->affine.rx=current.rx*affine.sx+current.sy*affine.rx; drawInfo->affine.ry=current.sx*affine.ry+current.ry*affine.sy; drawInfo->affine.sy=current.rx*affine.ry+current.sy*affine.sy; drawInfo->affine.tx=current.sx*affine.tx+current.ry*affine.ty +current.tx; } GetPPException; AnnotateImage(image(),drawInfo,exceptionInfo); // Restore original values drawInfo->affine=oaffine; drawInfo->text=(char *) NULL; drawInfo->geometry=(char *) NULL; ThrowImageException; } void Magick::Image::annotate(const std::string &text_, const GravityType gravity_) { DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->gravity=gravity_; GetPPException; AnnotateImage(image(),drawInfo,exceptionInfo); drawInfo->gravity=NorthWestGravity; drawInfo->text=(char *) NULL; ThrowImageException; } void Magick::Image::artifact(const std::string &name_,const std::string &value_) { modifyImage(); (void) SetImageArtifact(image(),name_.c_str(),value_.c_str()); } std::string Magick::Image::artifact(const std::string &name_) const { const char *value; value=GetImageArtifact(constImage(),name_.c_str()); if (value) return(std::string(value)); return(std::string()); } void Magick::Image::attribute(const std::string name_,const char *value_) { modifyImage(); GetPPException; SetImageProperty(image(),name_.c_str(),value_,exceptionInfo); ThrowImageException; } void Magick::Image::attribute(const std::string name_,const std::string value_) { modifyImage(); GetPPException; SetImageProperty(image(),name_.c_str(),value_.c_str(),exceptionInfo); ThrowImageException; } std::string Magick::Image::attribute(const std::string name_) const { const char *value; GetPPException; value=GetImageProperty(constImage(),name_.c_str(),exceptionInfo); ThrowImageException; if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::autoGamma(void) { modifyImage(); GetPPException; (void) SyncImageSettings(imageInfo(),image(),exceptionInfo); (void) AutoGammaImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::autoGammaChannel(const ChannelType channel_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) SyncImageSettings(imageInfo(),image(),exceptionInfo); (void) AutoGammaImage(image(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::autoLevel(void) { modifyImage(); GetPPException; (void) AutoLevelImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::autoLevelChannel(const ChannelType channel_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) AutoLevelImage(image(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::autoOrient(void) { MagickCore::Image *newImage; if (image()->orientation == UndefinedOrientation || image()->orientation == TopLeftOrientation) return; GetPPException; newImage=AutoOrientImage(constImage(),image()->orientation,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::autoThreshold(const AutoThresholdMethod method_) { modifyImage(); GetPPException; AutoThresholdImage(image(),method_, exceptionInfo); ThrowImageException; } void Magick::Image::blackThreshold(const std::string &threshold_) { modifyImage(); GetPPException; BlackThresholdImage(image(),threshold_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::blackThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); BlackThresholdImage(image(),threshold_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::blueShift(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=BlueShiftImage(constImage(),factor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=BlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blurChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=BlurImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::border(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo borderInfo=geometry_; GetPPException; newImage=BorderImage(constImage(),&borderInfo,image()->compose, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::brightnessContrast(const double brightness_, const double contrast_) { modifyImage(); GetPPException; BrightnessContrastImage(image(),brightness_,contrast_,exceptionInfo); ThrowImageException; } void Magick::Image::brightnessContrastChannel(const ChannelType channel_, const double brightness_,const double contrast_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); BrightnessContrastImage(image(),brightness_,contrast_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::cannyEdge(const double radius_,const double sigma_, const double lowerPercent_,const double upperPercent_) { MagickCore::Image *newImage; modifyImage(); GetPPException; newImage=CannyEdgeImage(constImage(),radius_,sigma_,lowerPercent_, upperPercent_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cdl(const std::string &cdl_) { modifyImage(); GetPPException; (void) ColorDecisionListImage(image(),cdl_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::channel(const ChannelType channel_) { MagickCore::Image *newImage; GetPPException; newImage=SeparateImage(image(),channel_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::charcoal(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=CharcoalImage(image(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::charcoalChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=CharcoalImage(image(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::chop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo chopInfo=geometry_; GetPPException; newImage=ChopImage(image(),&chopInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::chromaBluePrimary(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.blue_primary.x=x_; image()->chromaticity.blue_primary.y=y_; image()->chromaticity.blue_primary.z=z_; } void Magick::Image::chromaBluePrimary(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.blue_primary.x; *y_=constImage()->chromaticity.blue_primary.y; *z_=constImage()->chromaticity.blue_primary.z; } void Magick::Image::chromaGreenPrimary(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.green_primary.x=x_; image()->chromaticity.green_primary.y=y_; image()->chromaticity.green_primary.z=z_; } void Magick::Image::chromaGreenPrimary(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.green_primary.x; *y_=constImage()->chromaticity.green_primary.y; *z_=constImage()->chromaticity.green_primary.z; } void Magick::Image::chromaRedPrimary(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.red_primary.x=x_; image()->chromaticity.red_primary.y=y_; image()->chromaticity.red_primary.z=z_; } void Magick::Image::chromaRedPrimary(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.red_primary.x; *y_=constImage()->chromaticity.red_primary.y; *z_=constImage()->chromaticity.red_primary.z; } void Magick::Image::chromaWhitePoint(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.white_point.x=x_; image()->chromaticity.white_point.y=y_; image()->chromaticity.white_point.z=z_; } void Magick::Image::chromaWhitePoint(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.white_point.x; *y_=constImage()->chromaticity.white_point.y; *z_=constImage()->chromaticity.white_point.z; } void Magick::Image::clamp(void) { modifyImage(); GetPPException; ClampImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::clampChannel(const ChannelType channel_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); ClampImage(image(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::clip(void) { modifyImage(); GetPPException; ClipImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::clipPath(const std::string pathname_,const bool inside_) { modifyImage(); GetPPException; ClipImagePath(image(),pathname_.c_str(),(MagickBooleanType) inside_, exceptionInfo); ThrowImageException; } void Magick::Image::clut(const Image &clutImage_, const PixelInterpolateMethod method) { modifyImage(); GetPPException; ClutImage(image(),clutImage_.constImage(),method,exceptionInfo); ThrowImageException; } void Magick::Image::clutChannel(const ChannelType channel_, const Image &clutImage_,const PixelInterpolateMethod method) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); ClutImage(image(),clutImage_.constImage(),method,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::colorize(const unsigned int alpha_,const Color &penColor_) { colorize(alpha_,alpha_,alpha_,penColor_); } void Magick::Image::colorize(const unsigned int alphaRed_, const unsigned int alphaGreen_,const unsigned int alphaBlue_, const Color &penColor_) { char blend[MagickPathExtent]; MagickCore::Image *newImage; PixelInfo target; if (!penColor_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Pen color argument is invalid"); FormatLocaleString(blend,MagickPathExtent,"%u/%u/%u",alphaRed_,alphaGreen_, alphaBlue_); target=static_cast<PixelInfo>(penColor_); GetPPException; newImage=ColorizeImage(image(),blend,&target,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::colorMap(const size_t index_,const Color &color_) { MagickCore::Image *imageptr; imageptr=image(); if (index_ > (MaxColormapSize-1)) throwExceptionExplicit(MagickCore::OptionError, "Colormap index must be less than MaxColormapSize"); if (!color_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Color argument is invalid"); modifyImage(); // Ensure that colormap size is large enough if (colorMapSize() < (index_+1)) colorMapSize(index_+1); // Set color at index in colormap (imageptr->colormap)[index_]=color_; } Magick::Color Magick::Image::colorMap(const size_t index_) const { if (!constImage()->colormap) { throwExceptionExplicit(MagickCore::OptionError, "Image does not contain a colormap"); return(Color()); } if (index_ > constImage()->colors-1) throwExceptionExplicit(MagickCore::OptionError,"Index out of range"); return(Magick::Color((constImage()->colormap)[index_])); } void Magick::Image::colorMatrix(const size_t order_, const double *color_matrix_) { KernelInfo *kernel_info; GetPPException; kernel_info=AcquireKernelInfo((const char *) NULL,exceptionInfo); if (kernel_info != (KernelInfo *) NULL) { kernel_info->width=order_; kernel_info->height=order_; kernel_info->values=(MagickRealType *) AcquireAlignedMemory(order_, order_*sizeof(*kernel_info->values)); if (kernel_info->values != (MagickRealType *) NULL) { MagickCore::Image *newImage; for (ssize_t i=0; i < (ssize_t) (order_*order_); i++) kernel_info->values[i]=color_matrix_[i]; newImage=ColorMatrixImage(image(),kernel_info,exceptionInfo); replaceImage(newImage); } kernel_info=DestroyKernelInfo(kernel_info); } ThrowImageException; } bool Magick::Image::compare(const Image &reference_) const { bool status; Image ref=reference_; GetPPException; status=static_cast<bool>(IsImagesEqual(constImage(),ref.constImage(), exceptionInfo)); ThrowImageException; return(status); } double Magick::Image::compare(const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); ThrowImageException; return(distortion); } double Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetAndSetPPChannelMask(channel_); GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); RestorePPChannelMask; ThrowImageException; return(distortion); } Magick::Image Magick::Image::compare(const Image &reference_, const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; newImage=CompareImages(image(),reference_.constImage(),metric_,distortion, exceptionInfo); ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } Magick::Image Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=CompareImages(image(),reference_.constImage(),metric_,distortion, exceptionInfo); RestorePPChannelMask; ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::composite(const Image &compositeImage_, const Geometry &offset_,const CompositeOperator compose_) { size_t height=rows(), width=columns(); ssize_t x=offset_.xOff(), y=offset_.yOff(); ParseMetaGeometry(static_cast<std::string>(offset_).c_str(),&x,&y,&width, &height); modifyImage(); GetPPException; CompositeImage(image(),compositeImage_.constImage(),compose_,MagickTrue, x,y,exceptionInfo); ThrowImageException; } void Magick::Image::composite(const Image &compositeImage_, const GravityType gravity_,const CompositeOperator compose_) { RectangleInfo geometry; modifyImage(); SetGeometry(compositeImage_.constImage(),&geometry); GravityAdjustGeometry(columns(),rows(),gravity_,&geometry); GetPPException; CompositeImage(image(),compositeImage_.constImage(),compose_,MagickTrue, geometry.x,geometry.y,exceptionInfo); ThrowImageException; } void Magick::Image::composite(const Image &compositeImage_, const ssize_t xOffset_,const ssize_t yOffset_, const CompositeOperator compose_) { // Image supplied as compositeImage is composited with current image and // results in updating current image. modifyImage(); GetPPException; CompositeImage(image(),compositeImage_.constImage(),compose_,MagickTrue, xOffset_,yOffset_,exceptionInfo); ThrowImageException; } void Magick::Image::connectedComponents(const size_t connectivity_) { MagickCore::Image *newImage; GetPPException; newImage=ConnectedComponentsImage(constImage(),connectivity_, (CCObjectInfo **) NULL,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::contrast(const bool sharpen_) { modifyImage(); GetPPException; ContrastImage(image(),(MagickBooleanType) sharpen_,exceptionInfo); ThrowImageException; } void Magick::Image::contrastStretch(const double blackPoint_, const double whitePoint_) { modifyImage(); GetPPException; ContrastStretchImage(image(),blackPoint_,whitePoint_,exceptionInfo); ThrowImageException; } void Magick::Image::contrastStretchChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); ContrastStretchImage(image(),blackPoint_,whitePoint_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::convolve(const size_t order_,const double *kernel_) { KernelInfo *kernel_info; GetPPException; kernel_info=AcquireKernelInfo((const char *) NULL,exceptionInfo); kernel_info->width=order_; kernel_info->height=order_; kernel_info->x=(ssize_t) (order_-1)/2; kernel_info->y=(ssize_t) (order_-1)/2; kernel_info->values=(MagickRealType *) AcquireAlignedMemory(order_, order_*sizeof(*kernel_info->values)); if (kernel_info->values != (MagickRealType *) NULL) { MagickCore::Image *newImage; for (ssize_t i=0; i < (ssize_t) (order_*order_); i++) kernel_info->values[i]=kernel_[i]; newImage=ConvolveImage(image(),kernel_info,exceptionInfo); replaceImage(newImage); } kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException; } void Magick::Image::copyPixels(const Image &source_,const Geometry &geometry_, const Offset &offset_) { const OffsetInfo offset=offset_; const RectangleInfo geometry=geometry_; GetPPException; (void) CopyImagePixels(image(),source_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } void Magick::Image::crop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo cropInfo=geometry_; GetPPException; newImage=CropImage(constImage(),&cropInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cycleColormap(const ssize_t amount_) { modifyImage(); GetPPException; CycleColormapImage(image(),amount_,exceptionInfo); ThrowImageException; } void Magick::Image::decipher(const std::string &passphrase_) { modifyImage(); GetPPException; DecipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::defineSet(const std::string &magick_, const std::string &key_,bool flag_) { std::string definition; modifyImage(); definition=magick_ + ":" + key_; if (flag_) (void) SetImageOption(imageInfo(),definition.c_str(),""); else DeleteImageOption(imageInfo(),definition.c_str()); } bool Magick::Image::defineSet(const std::string &magick_, const std::string &key_ ) const { const char *option; std::string key; key=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),key.c_str()); if (option) return(true); return(false); } void Magick::Image::defineValue(const std::string &magick_, const std::string &key_,const std::string &value_) { std::string format, option; modifyImage(); format=magick_ + ":" + key_; option=value_; (void) SetImageOption(imageInfo(),format.c_str(),option.c_str()); } std::string Magick::Image::defineValue(const std::string &magick_, const std::string &key_) const { const char *option; std::string definition; definition=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),definition.c_str()); if (option) return(std::string(option)); return(std::string()); } void Magick::Image::deskew(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=DeskewImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::despeckle(void) { MagickCore::Image *newImage; GetPPException; newImage=DespeckleImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::display(void) { GetPPException; DisplayImages(imageInfo(),image(),exceptionInfo); ThrowImageException; } void Magick::Image::distort(const DistortMethod method_, const size_t numberArguments_,const double *arguments_,const bool bestfit_) { MagickCore::Image *newImage; GetPPException; newImage=DistortImage(constImage(), method_,numberArguments_,arguments_, bestfit_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::draw(const Magick::Drawable &drawable_) { DrawingWand *wand; modifyImage(); wand=AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { drawable_.operator()(wand); DrawRender(wand); ClonePPDrawException(wand); wand=DestroyDrawingWand(wand); ThrowPPDrawException(quiet()); } } void Magick::Image::draw(const std::vector<Magick::Drawable> &drawable_) { DrawingWand *wand; modifyImage(); wand= AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { for (std::vector<Magick::Drawable>::const_iterator p = drawable_.begin(); p != drawable_.end(); p++ ) { p->operator()(wand); if (DrawGetExceptionType(wand) != MagickCore::UndefinedException) break; } if (DrawGetExceptionType(wand) == MagickCore::UndefinedException) DrawRender(wand); ClonePPDrawException(wand); wand=DestroyDrawingWand(wand); ThrowPPDrawException(quiet()); } } void Magick::Image::edge(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=EdgeImage(constImage(),radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::emboss(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=EmbossImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::encipher(const std::string &passphrase_) { modifyImage(); GetPPException; EncipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::enhance(void) { MagickCore::Image *newImage; GetPPException; newImage=EnhanceImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::equalize(void) { modifyImage(); GetPPException; EqualizeImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::erase(void) { modifyImage(); GetPPException; (void) SetImageBackgroundColor(image(),exceptionInfo); ThrowImageException; } void Magick::Image::evaluate(const ChannelType channel_, const MagickEvaluateOperator operator_,double rvalue_) { GetPPException; GetAndSetPPChannelMask(channel_); EvaluateImage(image(),operator_,rvalue_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::evaluate(const ChannelType channel_, const MagickFunction function_,const size_t number_parameters_, const double *parameters_) { GetPPException; GetAndSetPPChannelMask(channel_); FunctionImage(image(),function_,number_parameters_,parameters_, exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::evaluate(const ChannelType channel_,const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_, const MagickEvaluateOperator operator_,const double rvalue_) { RectangleInfo geometry; MagickCore::Image *cropImage; geometry.width = columns_; geometry.height = rows_; geometry.x = x_; geometry.y = y_; GetPPException; cropImage=CropImage(image(),&geometry,exceptionInfo); GetAndSetPPChannelMask(channel_); EvaluateImage(cropImage,operator_,rvalue_,exceptionInfo); RestorePPChannelMask; (void) CompositeImage(image(),cropImage,image()->alpha_trait == BlendPixelTrait ? OverCompositeOp : CopyCompositeOp,MagickFalse, geometry.x,geometry.y,exceptionInfo ); cropImage=DestroyImageList(cropImage); ThrowImageException; } void Magick::Image::extent(const Geometry &geometry_ ) { MagickCore::Image *newImage; RectangleInfo extentInfo=geometry_; modifyImage(); extentInfo.x=geometry_.xOff(); extentInfo.y=geometry_.yOff(); GetPPException; newImage=ExtentImage(image(),&extentInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); extent(geometry_); } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { backgroundColor(backgroundColor_); extent(geometry_,gravity_); } void Magick::Image::extent(const Geometry &geometry_, const GravityType gravity_) { RectangleInfo geometry; SetGeometry(image(),&geometry); geometry.width=geometry_.width(); geometry.height=geometry_.height(); GravityAdjustGeometry(image()->columns,image()->rows,gravity_,&geometry); extent(geometry); } void Magick::Image::flip(void) { MagickCore::Image *newImage; GetPPException; newImage=FlipImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::floodFillAlpha(const ssize_t x_,const ssize_t y_, const unsigned int alpha_,const bool invert_) { PixelInfo target; modifyImage(); target=static_cast<PixelInfo>(pixelColor(x_,y_)); target.alpha=alpha_; GetPPException; GetAndSetPPChannelMask(AlphaChannel); FloodfillPaintImage(image(),options()->drawInfo(),&target,x_,y_, (MagickBooleanType)invert_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::floodFillAlpha(const ssize_t x_,const ssize_t y_, const unsigned int alpha_,const Color &target_,const bool invert_) { PixelInfo target; modifyImage(); target=static_cast<PixelInfo>(target_); target.alpha=alpha_; GetPPException; GetAndSetPPChannelMask(AlphaChannel); FloodfillPaintImage(image(),options()->drawInfo(),&target,x_,y_, (MagickBooleanType)invert_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(pixelColor(x_,y_)); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,borderColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(borderColor_); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(pixelColor(x_,y_)); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,borderColor_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(borderColor_); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::flop(void) { MagickCore::Image *newImage; GetPPException; newImage=FlopImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fontTypeMetrics(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); GetPPException; GetTypeMetrics(image(),drawInfo,&(metrics->_typeMetric),exceptionInfo); drawInfo->text=0; ThrowImageException; } void Magick::Image::fontTypeMetricsMultiline(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); GetPPException; GetMultilineTypeMetrics(image(),drawInfo,&(metrics->_typeMetric),exceptionInfo); drawInfo->text=0; ThrowImageException; } void Magick::Image::frame(const Geometry &geometry_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(geometry_.width()); info.y=static_cast<ssize_t>(geometry_.height()); info.width=columns() + (static_cast<size_t>(info.x) << 1); info.height=rows() + (static_cast<size_t>(info.y) << 1); info.outer_bevel=geometry_.xOff(); info.inner_bevel=geometry_.yOff(); GetPPException; newImage=FrameImage(constImage(),&info,image()->compose,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::frame(const size_t width_,const size_t height_, const ssize_t innerBevel_,const ssize_t outerBevel_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(width_); info.y=static_cast<ssize_t>(height_); info.width=columns() + (static_cast<size_t>(info.x) << 1); info.height=rows() + (static_cast<size_t>(info.y) << 1); info.outer_bevel=static_cast<ssize_t>(outerBevel_); info.inner_bevel=static_cast<ssize_t>(innerBevel_); GetPPException; newImage=FrameImage(constImage(),&info,image()->compose,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression_) { MagickCore::Image *newImage; GetPPException; newImage=FxImage(constImage(),expression_.c_str(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression_, const Magick::ChannelType channel_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=FxImage(constImage(),expression_.c_str(),exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::gamma(const double gamma_) { modifyImage(); GetPPException; GammaImage(image(),gamma_,exceptionInfo); ThrowImageException; } void Magick::Image::gamma(const double gammaRed_,const double gammaGreen_, const double gammaBlue_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(RedChannel); (void) GammaImage(image(),gammaRed_,exceptionInfo); SetPPChannelMask(GreenChannel); (void) GammaImage(image(),gammaGreen_,exceptionInfo); SetPPChannelMask(BlueChannel); (void) GammaImage(image(),gammaBlue_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::gaussianBlur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=GaussianBlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::gaussianBlurChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=GaussianBlurImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } const Magick::Quantum *Magick::Image::getConstPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) const { const Quantum *p; GetPPException; p=GetVirtualPixels(constImage(),x_, y_,columns_, rows_,exceptionInfo); ThrowImageException; return(p); } const void *Magick::Image::getConstMetacontent(void) const { const void *result; result=GetVirtualMetacontent(constImage()); if(!result) throwExceptionExplicit(MagickCore::OptionError, "Unable to retrieve meta content."); return(result); } void *Magick::Image::getMetacontent(void ) { void *result; result=GetAuthenticMetacontent(image()); if(!result) throwExceptionExplicit(MagickCore::OptionError, "Unable to retrieve meta content."); return(result); } Magick::Quantum *Magick::Image::getPixels(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_) { Quantum *result; modifyImage(); GetPPException; result=GetAuthenticPixels(image(),x_, y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::grayscale(const PixelIntensityMethod method_) { modifyImage(); GetPPException; (void) GrayscaleImage(image(),method_,exceptionInfo); ThrowImageException; } void Magick::Image::haldClut(const Image &clutImage_) { modifyImage(); GetPPException; (void) HaldClutImage(image(),clutImage_.constImage(),exceptionInfo); ThrowImageException; } void Magick::Image::houghLine(const size_t width_,const size_t height_, const size_t threshold_) { MagickCore::Image *newImage; GetPPException; newImage=HoughLineImage(constImage(),width_,height_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::ImageType Magick::Image::identifyType(void) const { ImageType image_type; GetPPException; image_type=IdentifyImageType(constImage(),exceptionInfo); ThrowImageException; return(image_type); } void Magick::Image::implode(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=ImplodeImage(constImage(),factor_,image()->interpolate, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::inverseFourierTransform(const Image &phase_) { inverseFourierTransform(phase_,true); } void Magick::Image::inverseFourierTransform(const Image &phase_, const bool magnitude_) { MagickCore::Image *newImage; GetPPException; newImage=InverseFourierTransformImage(constImage(),phase_.constImage(), magnitude_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwahara(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=KuwaharaImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwaharaChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=KuwaharaImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); RestorePPChannelMask; ThrowImageException; } void Magick::Image::level(const double blackPoint_,const double whitePoint_, const double gamma_) { modifyImage(); GetPPException; (void) LevelImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); ThrowImageException; } void Magick::Image::levelChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_,const double gamma_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) LevelImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::levelColors(const Color &blackColor_, const Color &whiteColor_,const bool invert_) { PixelInfo black, white; modifyImage(); black=static_cast<PixelInfo>(blackColor_); white=static_cast<PixelInfo>(whiteColor_); GetPPException; (void) LevelImageColors(image(),&black,&white,invert_ == true ? MagickTrue : MagickFalse,exceptionInfo); ThrowImageException; } void Magick::Image::levelColorsChannel(const ChannelType channel_, const Color &blackColor_,const Color &whiteColor_,const bool invert_) { PixelInfo black, white; modifyImage(); black=static_cast<PixelInfo>(blackColor_); white=static_cast<PixelInfo>(whiteColor_); GetPPException; GetAndSetPPChannelMask(channel_); (void) LevelImageColors(image(),&black,&white,invert_ == true ? MagickTrue : MagickFalse,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::levelize(const double blackPoint_,const double whitePoint_, const double gamma_) { modifyImage(); GetPPException; (void) LevelizeImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); ThrowImageException; } void Magick::Image::levelizeChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_,const double gamma_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) LevelizeImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::linearStretch(const double blackPoint_, const double whitePoint_) { modifyImage(); GetPPException; LinearStretchImage(image(),blackPoint_,whitePoint_,exceptionInfo); ThrowImageException; } void Magick::Image::liquidRescale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=LiquidRescaleImage(image(),width,height,x,y,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::localContrast(const double radius_,const double strength_) { MagickCore::Image *newImage; GetPPException; newImage=LocalContrastImage(constImage(),radius_,strength_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::magnify(void) { MagickCore::Image *newImage; GetPPException; newImage=MagnifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::map(const Image &mapImage_,const bool dither_) { modifyImage(); GetPPException; options()->quantizeDither(dither_); RemapImage(options()->quantizeInfo(),image(),mapImage_.constImage(), exceptionInfo); ThrowImageException; } void Magick::Image::meanShift(const size_t width_,const size_t height_, const double color_distance_) { MagickCore::Image *newImage; GetPPException; newImage=MeanShiftImage(constImage(),width_,height_,color_distance_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::medianFilter(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(image(),MedianStatistic,(size_t) radius_, (size_t) radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::minify(void) { MagickCore::Image *newImage; GetPPException; newImage=MinifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::modulate(const double brightness_,const double saturation_, const double hue_) { char modulate[MagickPathExtent + 1]; FormatLocaleString(modulate,MagickPathExtent,"%3.6f,%3.6f,%3.6f",brightness_, saturation_,hue_); modifyImage(); GetPPException; ModulateImage(image(),modulate,exceptionInfo); ThrowImageException; } Magick::ImageMoments Magick::Image::moments(void) const { return(ImageMoments(*this)); } void Magick::Image::morphology(const MorphologyMethod method_, const std::string kernel_,const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; GetPPException; kernel=AcquireKernelInfo(kernel_.c_str(),exceptionInfo); if (kernel == (KernelInfo *) NULL) throwExceptionExplicit(MagickCore::OptionError,"Unable to parse kernel."); newImage=MorphologyImage(constImage(),method_,iterations_,kernel, exceptionInfo); replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphology(const MorphologyMethod method_, const KernelInfoType kernel_,const std::string arguments_, const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(MagickCore::OptionError, "Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphology(method_,kernel,iterations_); } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const std::string kernel_, const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; GetPPException; kernel=AcquireKernelInfo(kernel_.c_str(),exceptionInfo); if (kernel == (KernelInfo *)NULL) { throwExceptionExplicit(MagickCore::OptionError, "Unable to parse kernel."); return; } GetAndSetPPChannelMask(channel_); newImage=MorphologyImage(constImage(),method_,iterations_,kernel, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const KernelInfoType kernel_, const std::string arguments_,const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(MagickCore::OptionError, "Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphologyChannel(channel_,method_,kernel,iterations_); } void Magick::Image::motionBlur(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=MotionBlurImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::negate(const bool grayscale_) { modifyImage(); GetPPException; NegateImage(image(),(MagickBooleanType) grayscale_,exceptionInfo); ThrowImageException; } void Magick::Image::negateChannel(const ChannelType channel_, const bool grayscale_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); NegateImage(image(),(MagickBooleanType) grayscale_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::normalize(void) { modifyImage(); GetPPException; NormalizeImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::oilPaint(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=OilPaintImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::opaque(const Color &opaqueColor_,const Color &penColor_, const bool invert_) { std::string opaqueColor, penColor; PixelInfo opaque, pen; if (!opaqueColor_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Opaque color argument is invalid"); if (!penColor_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Pen color argument is invalid"); modifyImage(); opaqueColor=opaqueColor_; penColor=penColor_; GetPPException; (void) QueryColorCompliance(opaqueColor.c_str(),AllCompliance,&opaque, exceptionInfo); (void) QueryColorCompliance(penColor.c_str(),AllCompliance,&pen, exceptionInfo); OpaquePaintImage(image(),&opaque,&pen,invert_ ? MagickTrue : MagickFalse, exceptionInfo); ThrowImageException; } void Magick::Image::orderedDither(std::string thresholdMap_) { modifyImage(); GetPPException; (void) OrderedDitherImage(image(),thresholdMap_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::orderedDitherChannel(const ChannelType channel_, std::string thresholdMap_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void)OrderedDitherImage(image(),thresholdMap_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::perceptible(const double epsilon_) { modifyImage(); GetPPException; PerceptibleImage(image(),epsilon_,exceptionInfo); ThrowImageException; } void Magick::Image::perceptibleChannel(const ChannelType channel_, const double epsilon_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); PerceptibleImage(image(),epsilon_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } Magick::ImagePerceptualHash Magick::Image::perceptualHash() const { return(ImagePerceptualHash(*this)); } void Magick::Image::ping(const std::string &imageSpec_) { MagickCore::Image *newImage; GetPPException; options()->fileName(imageSpec_); newImage=PingImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::ping(const Blob& blob_) { MagickCore::Image *newImage; GetPPException; newImage=PingBlob(imageInfo(),blob_.data(),blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::pixelColor(const ssize_t x_,const ssize_t y_, const Color &color_) { PixelInfo packet; Quantum *pixel; // Test arguments to ensure they are within the image. if (y_ > (ssize_t) rows() || x_ > (ssize_t) columns()) throwExceptionExplicit(MagickCore::OptionError, "Access outside of image boundary"); modifyImage(); // Set image to DirectClass classType(DirectClass ); // Get pixel view Pixels pixels(*this); // Set pixel value pixel=pixels.get(x_, y_, 1, 1 ); packet=color_; MagickCore::SetPixelViaPixelInfo(constImage(),&packet,pixel); // Tell ImageMagick that pixels have been updated pixels.sync(); } Magick::Color Magick::Image::pixelColor(const ssize_t x_, const ssize_t y_) const { const Quantum *pixel; pixel=getConstPixels(x_,y_,1,1); if (pixel) { PixelInfo packet; MagickCore::GetPixelInfoPixel(constImage(),pixel,&packet); return(Color(packet)); } return(Color()); // invalid } void Magick::Image::polaroid(const std::string &caption_,const double angle_, const PixelInterpolateMethod method_) { MagickCore::Image *newImage; GetPPException; newImage=PolaroidImage(constImage(),options()->drawInfo(),caption_.c_str(), angle_,method_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::posterize(const size_t levels_,const DitherMethod method_) { modifyImage(); GetPPException; PosterizeImage(image(),levels_,method_,exceptionInfo); ThrowImageException; } void Magick::Image::posterizeChannel(const ChannelType channel_, const size_t levels_,const DitherMethod method_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); PosterizeImage(image(),levels_,method_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::process(std::string name_,const ssize_t argc, const char **argv) { modifyImage(); GetPPException; (void) InvokeDynamicImageFilter(name_.c_str(),&image(),argc,argv, exceptionInfo); ThrowImageException; } void Magick::Image::profile(const std::string name_, const Magick::Blob &profile_) { modifyImage(); GetPPException; (void) ProfileImage(image(),name_.c_str(),(unsigned char *)profile_.data(), profile_.length(),exceptionInfo); ThrowImageException; } Magick::Blob Magick::Image::profile(const std::string name_) const { const StringInfo *profile; profile=GetImageProfile(constImage(),name_.c_str()); if (profile == (StringInfo *) NULL) return(Blob()); return(Blob((void*) GetStringInfoDatum(profile),GetStringInfoLength( profile))); } void Magick::Image::quantize(const bool measureError_) { modifyImage(); if (measureError_) options()->quantizeInfo()->measure_error=MagickTrue; else options()->quantizeInfo()->measure_error=MagickFalse; GetPPException; QuantizeImage(options()->quantizeInfo(),image(),exceptionInfo); ThrowImageException; } void Magick::Image::raise(const Geometry &geometry_,const bool raisedFlag_) { RectangleInfo raiseInfo=geometry_; GetPPException; modifyImage(); RaiseImage(image(),&raiseInfo,raisedFlag_ == true ? MagickTrue : MagickFalse, exceptionInfo); ThrowImageException; } void Magick::Image::randomThreshold(const double low_,const double high_) { GetPPException; (void) RandomThresholdImage(image(),low_,high_,exceptionInfo); ThrowImageException; } void Magick::Image::randomThresholdChannel(const ChannelType channel_, const double low_,const double high_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) RandomThresholdImage(image(),low_,high_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::read(const Blob &blob_) { MagickCore::Image *newImage; GetPPException; newImage=BlobToImage(imageInfo(),static_cast<const void *>(blob_.data()), blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::read(const Blob &blob_,const Geometry &size_) { size(size_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_) { size(size_); depth(depth_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) { size(size_); depth(depth_); magick(magick_); // Set explicit image format fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const std::string &magick_) { size(size_); magick(magick_); // Set explicit image format fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Geometry &size_,const std::string &imageSpec_) { size(size_); read(imageSpec_); } void Magick::Image::read(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) { MagickCore::Image *newImage; GetPPException; newImage=ConstituteImage(width_,height_,map_.c_str(),type_, pixels_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::read(const std::string &imageSpec_) { MagickCore::Image *newImage; GetPPException; options()->fileName(imageSpec_); newImage=ReadImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::readMask(const Magick::Image &mask_) { mask(mask_,ReadPixelMask); } Magick::Image Magick::Image::readMask(void) const { return(mask(ReadPixelMask)); } void Magick::Image::readPixels(const Magick::QuantumType quantum_, const unsigned char *source_) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(imageInfo(),image()); GetPPException; ImportQuantumPixels(image(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,source_,exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::reduceNoise(void) { reduceNoise(3); } void Magick::Image::reduceNoise(const size_t order_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(constImage(),NonpeakStatistic,order_, order_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::repage() { modifyImage(); options()->page(Geometry()); image()->page.width = 0; image()->page.height = 0; image()->page.x = 0; image()->page.y = 0; } void Magick::Image::resample(const Point &density_) { MagickCore::Image *newImage; GetPPException; newImage=ResampleImage(constImage(),density_.x(),density_.y(), image()->filter,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::resize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; // Calculate new size. This code should be supported using binary arguments // in the ImageMagick library. ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const Geometry &roll_) { MagickCore::Image *newImage; GetPPException; newImage=RollImage(constImage(),roll_.xOff(),roll_.yOff(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const size_t columns_,const size_t rows_) { MagickCore::Image *newImage; GetPPException; newImage=RollImage(constImage(),static_cast<ssize_t>(columns_), static_cast<ssize_t>(rows_),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotate(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=RotateImage(constImage(),degrees_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlur(const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=RotationalBlurImage(constImage(),angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlurChannel(const ChannelType channel_, const double angle_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=RotationalBlurImage(constImage(),angle_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::sample(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=SampleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::scale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ScaleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::segment(const double clusterThreshold_, const double smoothingThreshold_) { modifyImage(); GetPPException; SegmentImage(image(),options()->quantizeColorSpace(), (MagickBooleanType) options()->verbose(),clusterThreshold_, smoothingThreshold_,exceptionInfo); SyncImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::selectiveBlur(const double radius_,const double sigma_, const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SelectiveBlurImage(constImage(),radius_,sigma_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::selectiveBlurChannel(const ChannelType channel_, const double radius_,const double sigma_,const double threshold_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=SelectiveBlurImage(constImage(),radius_,sigma_,threshold_, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::separate(const ChannelType channel_) const { MagickCore::Image *image; GetPPException; image=SeparateImage(constImage(),channel_,exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::sepiaTone(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SepiaToneImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } bool Magick::Image::setColorMetric(const Image &reference_) { bool status; Image ref=reference_; GetPPException; modifyImage(); status=static_cast<bool>(SetImageColorMetric(image(),ref.constImage(), exceptionInfo)); ThrowImageException; return(status); } Magick::Quantum *Magick::Image::setPixels(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_) { Quantum *result; modifyImage(); GetPPException; result=QueueAuthenticPixels(image(),x_,y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::shade(const double azimuth_,const double elevation_, const bool colorShading_) { MagickCore::Image *newImage; GetPPException; newImage=ShadeImage(constImage(),colorShading_ == true ? MagickTrue : MagickFalse,azimuth_,elevation_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shadow(const double percent_opacity_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=ShadowImage(constImage(),percent_opacity_, sigma_,x_, y_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=SharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpenChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=SharpenImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::shave(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo shaveInfo=geometry_; GetPPException; newImage=ShaveImage(constImage(),&shaveInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shear(const double xShearAngle_,const double yShearAngle_) { MagickCore::Image *newImage; GetPPException; newImage=ShearImage(constImage(),xShearAngle_,yShearAngle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sigmoidalContrast(const bool sharpen_, const double contrast,const double midpoint) { modifyImage(); GetPPException; (void) SigmoidalContrastImage(image(),(MagickBooleanType) sharpen_,contrast, midpoint,exceptionInfo); ThrowImageException; } std::string Magick::Image::signature(const bool force_) const { return(_imgRef->signature(force_)); } void Magick::Image::sketch(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=SketchImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::solarize(const double factor_) { modifyImage(); GetPPException; SolarizeImage(image(),factor_,exceptionInfo); ThrowImageException; } void Magick::Image::sparseColor(const ChannelType channel_, const SparseColorMethod method_,const size_t numberArguments_, const double *arguments_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=SparseColorImage(constImage(),method_,numberArguments_,arguments_, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo spliceInfo=geometry_; GetPPException; newImage=SpliceImage(constImage(),&spliceInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); splice(geometry_); } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { backgroundColor(backgroundColor_); image()->gravity=gravity_; splice(geometry_); } void Magick::Image::spread(const double amount_) { MagickCore::Image *newImage; GetPPException; newImage=SpreadImage(constImage(),image()->interpolate,amount_,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::ImageStatistics Magick::Image::statistics() const { return(ImageStatistics(*this)); } void Magick::Image::stegano(const Image &watermark_) { MagickCore::Image *newImage; GetPPException; newImage=SteganoImage(constImage(),watermark_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::stereo(const Image &rightImage_) { MagickCore::Image *newImage; GetPPException; newImage=StereoImage(constImage(),rightImage_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::strip(void) { modifyImage(); GetPPException; StripImage(image(),exceptionInfo); ThrowImageException; } Magick::Image Magick::Image::subImageSearch(const Image &reference_, const MetricType metric_,Geometry *offset_,double *similarityMetric_, const double similarityThreshold) { MagickCore::Image *newImage; RectangleInfo offset; GetPPException; newImage=SimilarityImage(image(),reference_.constImage(),metric_, similarityThreshold,&offset,similarityMetric_,exceptionInfo); ThrowImageException; if (offset_ != (Geometry *) NULL) *offset_=offset; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::swirl(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=SwirlImage(constImage(),degrees_,image()->interpolate, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::syncPixels(void) { GetPPException; (void) SyncAuthenticPixels(image(),exceptionInfo); ThrowImageException; } void Magick::Image::texture(const Image &texture_) { modifyImage(); GetPPException; TextureImage(image(),texture_.constImage(),exceptionInfo); ThrowImageException; } void Magick::Image::threshold(const double threshold_) { modifyImage(); GetPPException; BilevelImage(image(),threshold_,exceptionInfo); ThrowImageException; } void Magick::Image::thumbnail(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ThumbnailImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::tint(const std::string opacity_) { MagickCore::Image *newImage; PixelInfo color; GetPPException; color=static_cast<PixelInfo>(constOptions()->fillColor()); newImage=TintImage(constImage(),opacity_.c_str(),&color,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transformOrigin(const double x_,const double y_) { modifyImage(); options()->transformOrigin(x_,y_); } void Magick::Image::transformReset(void) { modifyImage(); options()->transformReset(); } void Magick::Image::transformScale(const double sx_,const double sy_) { modifyImage(); options()->transformScale(sx_,sy_); } void Magick::Image::transparent(const Color &color_,const bool inverse_) { PixelInfo target; std::string color; if (!color_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Color argument is invalid"); color=color_; GetPPException; (void) QueryColorCompliance(color.c_str(),AllCompliance,&target, exceptionInfo); modifyImage(); TransparentPaintImage(image(),&target,TransparentAlpha, inverse_ == true ? MagickTrue : MagickFalse,exceptionInfo); ThrowImageException; } void Magick::Image::transparentChroma(const Color &colorLow_, const Color &colorHigh_) { std::string colorHigh, colorLow; PixelInfo targetHigh, targetLow; if (!colorLow_.isValid() || !colorHigh_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Color argument is invalid"); colorLow=colorLow_; colorHigh=colorHigh_; GetPPException; (void) QueryColorCompliance(colorLow.c_str(),AllCompliance,&targetLow, exceptionInfo); (void) QueryColorCompliance(colorHigh.c_str(),AllCompliance,&targetHigh, exceptionInfo); modifyImage(); TransparentPaintImageChroma(image(),&targetLow,&targetHigh,TransparentAlpha, MagickFalse,exceptionInfo); ThrowImageException; } void Magick::Image::transpose(void) { MagickCore::Image *newImage; GetPPException; newImage=TransposeImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transverse(void) { MagickCore::Image *newImage; GetPPException; newImage=TransverseImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::trim(void) { MagickCore::Image *newImage; GetPPException; newImage=TrimImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::uniqueColors(void) const { MagickCore::Image *image; GetPPException; image=UniqueImageColors(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::unsharpmask(const double radius_,const double sigma_, const double amount_,const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=UnsharpMaskImage(constImage(),radius_,sigma_,amount_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::unsharpmaskChannel(const ChannelType channel_, const double radius_,const double sigma_,const double amount_, const double threshold_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=UnsharpMaskImage(constImage(),radius_,sigma_,amount_,threshold_, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::vignette(const double radius_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=VignetteImage(constImage(),radius_,sigma_,x_,y_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::wave(const double amplitude_,const double wavelength_) { MagickCore::Image *newImage; GetPPException; newImage=WaveImage(constImage(),amplitude_,wavelength_,image()->interpolate, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::waveletDenoise(const double threshold_, const double softness_) { MagickCore::Image *newImage; GetPPException; newImage=WaveletDenoiseImage(constImage(),threshold_,softness_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::whiteThreshold(const std::string &threshold_) { modifyImage(); GetPPException; WhiteThresholdImage(image(),threshold_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::whiteThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); WhiteThresholdImage(image(),threshold_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::write(Blob *blob_) { size_t length=0; void *data; modifyImage(); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; } void Magick::Image::write(Blob *blob_,const std::string &magick_) { size_t length=0; void *data; modifyImage(); magick(magick_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; } void Magick::Image::write(Blob *blob_,const std::string &magick_, const size_t depth_) { size_t length=0; void *data; modifyImage(); magick(magick_); depth(depth_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; } void Magick::Image::write(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_,const std::string &map_, const StorageType type_,void *pixels_) { GetPPException; ExportImagePixels(image(),x_,y_,columns_,rows_,map_.c_str(),type_,pixels_, exceptionInfo); ThrowImageException; } void Magick::Image::write(const std::string &imageSpec_) { modifyImage(); fileName(imageSpec_); GetPPException; WriteImage(constImageInfo(),image(),exceptionInfo); ThrowImageException; } void Magick::Image::writeMask(const Magick::Image &mask_) { mask(mask_,WritePixelMask); } Magick::Image Magick::Image::writeMask(void) const { return(mask(WritePixelMask)); } void Magick::Image::writePixels(const Magick::QuantumType quantum_, unsigned char *destination_) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(imageInfo(),image()); GetPPException; ExportQuantumPixels(image(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,destination_, exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::zoom(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image::Image(MagickCore::Image *image_) : _imgRef(new ImageRef(image_)) { } MagickCore::Image *&Magick::Image::image(void) { return(_imgRef->image()); } const MagickCore::Image *Magick::Image::constImage(void) const { return(_imgRef->image()); } MagickCore::ImageInfo *Magick::Image::imageInfo(void) { return(_imgRef->options()->imageInfo()); } const MagickCore::ImageInfo *Magick::Image::constImageInfo(void) const { return(_imgRef->options()->imageInfo()); } Magick::Options *Magick::Image::options(void) { return(_imgRef->options()); } const Magick::Options *Magick::Image::constOptions(void) const { return(_imgRef->options()); } MagickCore::QuantizeInfo *Magick::Image::quantizeInfo(void) { return(_imgRef->options()->quantizeInfo()); } const MagickCore::QuantizeInfo *Magick::Image::constQuantizeInfo(void) const { return(_imgRef->options()->quantizeInfo()); } void Magick::Image::modifyImage(void) { if (!_imgRef->isShared()) return; GetPPException; replaceImage(CloneImage(image(),0,0,MagickTrue,exceptionInfo)); ThrowImageException; } MagickCore::Image *Magick::Image::replaceImage(MagickCore::Image *replacement_) { MagickCore::Image *image; if (replacement_) image=replacement_; else { GetPPException; image=AcquireImage(constImageInfo(),exceptionInfo); ThrowImageException; } _imgRef=ImageRef::replaceImage(_imgRef,image); return(image); } void Magick::Image::read(MagickCore::Image *image, MagickCore::ExceptionInfo *exceptionInfo) { // Ensure that multiple image frames were not read. if (image != (MagickCore::Image *) NULL && image->next != (MagickCore::Image *) NULL) { MagickCore::Image *next; // Destroy any extra image frames next=image->next; image->next=(MagickCore::Image *) NULL; next->previous=(MagickCore::Image *) NULL; DestroyImageList(next); } replaceImage(image); if (exceptionInfo->severity == MagickCore::UndefinedException && image == (MagickCore::Image *) NULL) { (void) MagickCore::DestroyExceptionInfo(exceptionInfo); if (!quiet()) throwExceptionExplicit(MagickCore::ImageWarning, "No image was loaded."); return; } ThrowImageException; } void Magick::Image::floodFill(const ssize_t x_,const ssize_t y_, const Magick::Image *fillPattern_,const Magick::Color &fill_, const MagickCore::PixelInfo *target_,const bool invert_) { Magick::Color fillColor; MagickCore::Image *fillPattern; // Set drawing fill pattern or fill color fillColor=options()->fillColor(); fillPattern=(MagickCore::Image *)NULL; if (options()->fillPattern() != (MagickCore::Image *)NULL) { GetPPException; fillPattern=CloneImage(options()->fillPattern(),0,0,MagickTrue, exceptionInfo); ThrowImageException; } if (fillPattern_ == (Magick::Image *)NULL) { options()->fillPattern((MagickCore::Image *)NULL); options()->fillColor(fill_); } else options()->fillPattern(fillPattern_->constImage()); GetPPException; (void) FloodfillPaintImage(image(),options()->drawInfo(), target_,static_cast<ssize_t>(x_),static_cast<ssize_t>(y_), (MagickBooleanType) invert_,exceptionInfo); options()->fillColor(fillColor); options()->fillPattern(fillPattern); ThrowImageException; } void Magick::Image::mask(const Magick::Image &mask_,const PixelMask type) { modifyImage(); GetPPException; if (mask_.isValid()) SetImageMask(image(),type,mask_.constImage(),exceptionInfo); else SetImageMask(image(),type,(MagickCore::Image *) NULL, exceptionInfo); ThrowImageException; } Magick::Image Magick::Image::mask(const PixelMask type) const { MagickCore::Image *image; GetPPException; image = GetImageMask(constImage(),type,exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); }
./CrossVul/dataset_final_sorted/CWE-416/cpp/good_2968_0
crossvul-cpp_data_bad_4222_0
/* * Copyright (c) 2009, The MilkyTracker Team. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name of the <ORGANIZATION> nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PlayerGeneric.cpp * MilkyPlay * * PlayerGeneric is a wrapper that allocates a suiting type of player * for a module while providing the same player interfaces. * Currently there are three types of players: PlayerFAR, PlayerSTD and PlayerIT * */ #include "PlayerGeneric.h" #include "MasterMixer.h" #include "XModule.h" #include "AudioDriver_WAVWriter.h" #include "AudioDriverManager.h" #include "PlayerBase.h" #include "PlayerSTD.h" #ifndef MILKYTRACKER #include "PlayerIT.h" #include "PlayerFAR.h" #endif #undef __VERBOSE__ class MixerNotificationListener : public MasterMixer::MasterMixerNotificationListener { private: class PlayerGeneric& player; public: MixerNotificationListener(PlayerGeneric& player) : player(player) { } virtual void masterMixerNotification(MasterMixer::MasterMixerNotifications notification) { player.adjustSettings(); } }; void PlayerGeneric::adjustSettings() { mp_uint32 bufferSize = mixer->getBufferSize(); mp_uint32 sampleRate = mixer->getSampleRate(); this->bufferSize = bufferSize; this->frequency = sampleRate; if (player) { player->setBufferSize(bufferSize); player->adjustFrequency(sampleRate); } } PlayerBase::PlayerTypes PlayerGeneric::getPreferredPlayerType(XModule* module) { if (module == NULL) return PlayerBase::PlayerType_INVALID; switch (module->getType()) { case XModule::ModuleType_669: case XModule::ModuleType_FAR: #ifndef MILKYTRACKER return PlayerBase::PlayerType_FAR; break; #endif case XModule::ModuleType_IT: #ifndef MILKYTRACKER return PlayerBase::PlayerType_IT; break; #endif case XModule::ModuleType_UNKNOWN: // just assume our standard player can handle this //case XModule::ModuleType_669: case XModule::ModuleType_AMF: case XModule::ModuleType_AMS: case XModule::ModuleType_CBA: case XModule::ModuleType_DBM: case XModule::ModuleType_DIGI: case XModule::ModuleType_DSM: case XModule::ModuleType_DSm: case XModule::ModuleType_DTM_1: case XModule::ModuleType_DTM_2: case XModule::ModuleType_GDM: case XModule::ModuleType_GMC: case XModule::ModuleType_IMF: case XModule::ModuleType_MDL: case XModule::ModuleType_MOD: case XModule::ModuleType_MTM: case XModule::ModuleType_MXM: case XModule::ModuleType_OKT: case XModule::ModuleType_PLM: case XModule::ModuleType_PSM: case XModule::ModuleType_PTM: case XModule::ModuleType_S3M: case XModule::ModuleType_STM: case XModule::ModuleType_SFX: case XModule::ModuleType_UNI: case XModule::ModuleType_ULT: case XModule::ModuleType_XM: case XModule::ModuleType_NONE: return PlayerBase::PlayerType_Generic; break; default: return PlayerBase::PlayerType_INVALID; } } PlayerBase* PlayerGeneric::getPreferredPlayer(XModule* module) const { switch (getPreferredPlayerType(module)) { #ifndef MILKYTRACKER case PlayerBase::PlayerType_FAR: return new PlayerFAR(frequency); case PlayerBase::PlayerType_IT: return new PlayerIT(frequency); #endif case PlayerBase::PlayerType_Generic: return new PlayerSTD(frequency); default: return NULL; } } PlayerGeneric::PlayerGeneric(mp_sint32 frequency, AudioDriverInterface* audioDriver/* = NULL*/) : mixer(NULL), player(NULL), frequency(frequency), audioDriver(audioDriver), audioDriverName(NULL) { listener = new MixerNotificationListener(*this); bufferSize = 0; sampleShift = 0; resamplerType = MIXER_NORMAL; idle = false; playOneRowOnly = false; paused = false; repeat = false; resetOnStopFlag = false; autoAdjustPeak = false; disableMixing = false; allowFilters = false; #ifdef __FORCEPOWEROFTWOBUFFERSIZE__ compensateBufferFlag = true; #else compensateBufferFlag = false; #endif masterVolume = panningSeparation = numMaxVirChannels = 256; resetMainVolumeOnStartPlayFlag = true; playMode = PlayMode_Auto; // Special playmode settings options[PlayModeOptionPanning8xx] = true; options[PlayModeOptionPanningE8x] = false; options[PlayModeOptionForcePTPitchLimit] = true; AudioDriverManager audioDriverManager; const char* defaultName = audioDriverManager.getPreferredAudioDriver()->getDriverID(); if (defaultName) { audioDriverName = new char[strlen(defaultName)+1]; strcpy(audioDriverName, defaultName); } } PlayerGeneric::~PlayerGeneric() { if (mixer) delete mixer; if (player) { if (mixer->isActive() && !mixer->isDeviceRemoved(player)) mixer->removeDevice(player); delete player; } delete[] audioDriverName; delete listener; } // -- wrapping mixer specific stuff ---------------------- void PlayerGeneric::setResamplerType(ResamplerTypes type) { resamplerType = type; if (player) player->setResamplerType(type); } void PlayerGeneric::setResamplerType(bool interpolation, bool ramping) { if (interpolation) { if (ramping) resamplerType = MIXER_LERPING_RAMPING; else resamplerType = MIXER_LERPING; } else { if (ramping) resamplerType = MIXER_NORMAL_RAMPING; else resamplerType = MIXER_NORMAL; } if (player) player->setResamplerType(resamplerType); } ChannelMixer::ResamplerTypes PlayerGeneric::getResamplerType() const { if (player) return player->getResamplerType(); return resamplerType; } void PlayerGeneric::setSampleShift(mp_sint32 shift) { sampleShift = shift; if (mixer) mixer->setSampleShift(shift); } mp_sint32 PlayerGeneric::getSampleShift() const { if (mixer) return mixer->getSampleShift(); return sampleShift; } void PlayerGeneric::setPeakAutoAdjust(bool b) { this->autoAdjustPeak = b; } mp_sint32 PlayerGeneric::adjustFrequency(mp_uint32 frequency) { this->frequency = frequency; mp_sint32 res = MP_OK; if (mixer) res = mixer->setSampleRate(frequency); return res; } mp_sint32 PlayerGeneric::getMixFrequency() const { if (player) return player->getMixFrequency(); return frequency; } mp_sint32 PlayerGeneric::beatPacketsToBufferSize(mp_uint32 numBeats) { return ChannelMixer::beatPacketsToBufferSize(getMixFrequency(), numBeats); } mp_sint32 PlayerGeneric::adjustBufferSize(mp_uint32 numBeats) { return setBufferSize(beatPacketsToBufferSize(numBeats)); } mp_sint32 PlayerGeneric::setBufferSize(mp_uint32 bufferSize) { mp_sint32 res = 0; this->bufferSize = bufferSize; if (mixer) { // If we're told to compensate the samples until we // we reached 2^n buffer sizes if (compensateBufferFlag) { for (mp_uint32 i = 0; i < 16; i++) { if ((unsigned)(1 << i) >= (unsigned)bufferSize) { bufferSize = 1 << i; break; } } } res = mixer->setBufferSize(bufferSize); } return res; } mp_sint32 PlayerGeneric::setPowerOfTwoCompensationFlag(bool b) { if (mixer && compensateBufferFlag != b) { compensateBufferFlag = b; setBufferSize(bufferSize); } return MP_OK; } bool PlayerGeneric::getPowerOfTwoCompensationFlag() const { return compensateBufferFlag; } const char* PlayerGeneric::getCurrentAudioDriverName() const { if (mixer) return mixer->getCurrentAudioDriverName(); return audioDriverName; } bool PlayerGeneric::setCurrentAudioDriverByName(const char* name) { if (name == NULL) return false; if (mixer) { bool res = mixer->setCurrentAudioDriverByName(name); if (audioDriverName) delete[] audioDriverName; const char* curDrvName = getCurrentAudioDriverName(); ASSERT(curDrvName); audioDriverName = new char[strlen(curDrvName)+1]; strcpy(audioDriverName, curDrvName); return res; } AudioDriverManager audioDriverManager; if (audioDriverManager.getAudioDriverByName(name)) { if (audioDriverName) delete[] audioDriverName; audioDriverName = new char[strlen(name)+1]; strcpy(audioDriverName, name); return true; } return false; } bool PlayerGeneric::isInitialized() const { if (mixer) return mixer->isInitialized(); return false; } bool PlayerGeneric::isPlaying() const { if (mixer) return mixer->isPlaying(); return false; } mp_int64 PlayerGeneric::getSampleCounter() const { if (player) return player->getSampleCounter(); return 0; } void PlayerGeneric::resetSampleCounter() { if (player) player->resetSampleCounter(); } mp_sint32 PlayerGeneric::getCurrentSamplePosition() const { if (mixer && mixer->getAudioDriver()) return mixer->getAudioDriver()->getBufferPos(); return 0; } mp_sint32 PlayerGeneric::getCurrentBeatIndex() { if (player) return player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return 0; } mp_sint32 PlayerGeneric::getCurrentSample(mp_sint32 position, mp_sint32 channel) { if (mixer) return mixer->getCurrentSample(position, channel); return 0; } mp_sint32 PlayerGeneric::getCurrentSamplePeak(mp_sint32 position, mp_sint32 channel) { if (mixer) return mixer->getCurrentSamplePeak(position, channel); return 0; } void PlayerGeneric::resetChannels() { if (player) player->resetChannelsFull(); } mp_sint32 PlayerGeneric::getNumAllocatedChannels() const { if (player) return player->getNumAllocatedChannels(); return 0; } mp_sint32 PlayerGeneric::getNumActiveChannels() const { if (player) return player->getNumActiveChannels(); return 0; } // -- wrapping player specific stuff ---------------------- void PlayerGeneric::setPlayMode(PlayModes mode) { playMode = mode; if (player) player->setPlayMode(mode); } PlayerGeneric::PlayModes PlayerGeneric::getPlayMode() const { if (player) return player->getPlayMode(); return playMode; } void PlayerGeneric::enable(PlayModeOptions option, bool b) { ASSERT(option>=PlayModeOptionFirst && option<PlayModeOptionLast); options[option] = b; if (player) player->enable(option, b); } bool PlayerGeneric::isEnabled(PlayModeOptions option) const { ASSERT(option>=PlayModeOptionFirst && option<PlayModeOptionLast); if (!player) return options[option]; else return player->isEnabled(option); } void PlayerGeneric::restart(mp_uint32 startPosition/* = 0*/, mp_uint32 startRow/* = 0*/, bool resetMixer/* = true*/, const mp_ubyte* customPanningTable/* = NULL*/, bool playOneRowOnly/* = false*/) { if (player) player->restart(startPosition, startRow, resetMixer, customPanningTable, playOneRowOnly); } void PlayerGeneric::reset() { if (player) player->reset(); } void PlayerGeneric::resetAllSpeed() { if (player) player->resetAllSpeed(); } mp_sint32 PlayerGeneric::startPlaying(XModule* module, bool repeat/* = false*/, mp_uint32 startPosition/* = 0*/, mp_uint32 startRow/* = 0*/, mp_sint32 numChannels/* = -1*/, const mp_ubyte* customPanningTable/* = NULL*/, bool idle/* = false*/, mp_sint32 patternIndex/* = -1*/, bool playOneRowOnly/* = false*/) { this->idle = idle; this->repeat = repeat; this->playOneRowOnly = playOneRowOnly; if (mixer == NULL) { mixer = new MasterMixer(frequency, bufferSize, 1, audioDriver); mixer->setMasterMixerNotificationListener(listener); mixer->setSampleShift(sampleShift); if (audioDriver == NULL) mixer->setCurrentAudioDriverByName(audioDriverName); } if (!player || player->getType() != getPreferredPlayerType(module)) { if (player) { if (!mixer->isDeviceRemoved(player)) mixer->removeDevice(player); delete player; } player = getPreferredPlayer(module); if (player) { // apply our own "state" to the state of the newly allocated player player->resetMainVolumeOnStartPlay(resetMainVolumeOnStartPlayFlag); player->resetOnStop(resetOnStopFlag); player->setBufferSize(bufferSize); player->setResamplerType(resamplerType); player->setMasterVolume(masterVolume); player->setPanningSeparation(panningSeparation); player->setPlayMode(playMode); for (mp_sint32 i = PlayModeOptionFirst; i < PlayModeOptionLast; i++) player->enable((PlayModeOptions)i, options[i]); player->setDisableMixing(disableMixing); player->setAllowFilters(allowFilters); //if (paused) // player->pausePlaying(); // adjust number of virtual channels if necessary setNumMaxVirChannels(numMaxVirChannels); } } if (player && mixer) { if (!mixer->isDeviceRemoved(player)) mixer->removeDevice(player); player->startPlaying(module, repeat, startPosition, startRow, numChannels, customPanningTable, idle, patternIndex, playOneRowOnly); mixer->addDevice(player); if (!mixer->isPlaying()) return mixer->start(); } return MP_OK; } void PlayerGeneric::setPatternToPlay(mp_sint32 patternIndex) { if (player) player->setPatternToPlay(patternIndex); } mp_sint32 PlayerGeneric::stopPlaying() { if (player) player->stopPlaying(); if (mixer) return mixer->stop(); return MP_OK; } bool PlayerGeneric::hasSongHalted() const { if (player) return player->hasSongHalted(); return true; } void PlayerGeneric::setIdle(bool idle) { this->idle = idle; if (player) player->setIdle(idle); } bool PlayerGeneric::isIdle() const { if (player) return player->isIdle(); return idle; } void PlayerGeneric::setRepeat(bool repeat) { this->repeat = repeat; if (player) player->setRepeat(repeat); } bool PlayerGeneric::isRepeating() const { if (player) return player->isRepeating(); return repeat; } mp_sint32 PlayerGeneric::pausePlaying() { paused = true; if (mixer) return mixer->pause(); return MP_OK; } mp_sint32 PlayerGeneric::resumePlaying() { if (player && !player->isPlaying()) player->resumePlaying(); if (mixer && mixer->isPaused()) return mixer->resume(); else if (mixer && !mixer->isPlaying()) return mixer->start(); return MP_OK; } bool PlayerGeneric::isPaused() const { if (mixer) return mixer->isPaused(); return paused; } void PlayerGeneric::setDisableMixing(bool b) { disableMixing = b; if (player) player->setDisableMixing(disableMixing); } void PlayerGeneric::setAllowFilters(bool b) { allowFilters = b; if (player) player->setAllowFilters(allowFilters); } bool PlayerGeneric::getAllowFilters() const { if (player) return player->getAllowFilters(); return allowFilters; } // volume control void PlayerGeneric::setMasterVolume(mp_sint32 vol) { masterVolume = vol; if (player) player->setMasterVolume(vol); } mp_sint32 PlayerGeneric::getMasterVolume() const { if (player) return player->getMasterVolume(); return masterVolume; } // panning control void PlayerGeneric::setPanningSeparation(mp_sint32 separation) { panningSeparation = separation; if (player) player->setPanningSeparation(separation); } mp_sint32 PlayerGeneric::getPanningSeparation() const { if (player) return player->getPanningSeparation(); return panningSeparation; } mp_sint32 PlayerGeneric::getSongMainVolume() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getSongMainVolume(index); } return 255; } mp_sint32 PlayerGeneric::getRow() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getRow(index); } return 0; } mp_sint32 PlayerGeneric::getOrder() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getOrder(index); } return 0; } void PlayerGeneric::getPosition(mp_sint32& order, mp_sint32& row) const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); player->getPosition(order, row, index); return; } order = row = 0; } mp_sint32 PlayerGeneric::getLastUnvisitedPosition() const { if (player) return player->getLastUnvisitedPosition(); return 0; } void PlayerGeneric::getPosition(mp_sint32& order, mp_sint32& row, mp_sint32& ticker) const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); player->getPosition(order, row, ticker, index); return; } order = row = ticker = 0; } mp_int64 PlayerGeneric::getSyncCount() const { if (player) return player->getSyncCount(); return 0; } mp_uint32 PlayerGeneric::getSyncSampleCounter() const { if (player) return player->getSyncSampleCounter(); return 0; } void PlayerGeneric::nextPattern() { if (player) player->nextPattern(); } void PlayerGeneric::lastPattern() { if (player) player->lastPattern(); } void PlayerGeneric::setPatternPos(mp_uint32 pos, mp_uint32 row/* = 0*/, bool resetChannels/* = true*/, bool resetFXMemory/* = true*/) { if (player) player->setPatternPos(pos, row, resetChannels, resetFXMemory); } mp_sint32 PlayerGeneric::getTempo() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getTempo(index); } return 0; } mp_sint32 PlayerGeneric::getSpeed() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getSpeed(index); } return 0; } void PlayerGeneric::resetOnStop(bool b) { resetOnStopFlag = b; if (player) player->resetOnStop(b); } void PlayerGeneric::resetMainVolumeOnStartPlay(bool b) { resetMainVolumeOnStartPlayFlag = b; if (player) player->resetMainVolumeOnStartPlay(b); } struct PeakAutoAdjustFilter : public Mixable { mp_uint32 mixerShift; mp_uint32 masterVolume; mp_sint32 lastPeakValue; PeakAutoAdjustFilter() : mixerShift(0), masterVolume(256), lastPeakValue(0) { } virtual void mix(mp_sint32* buffer, mp_uint32 bufferSize) { const mp_sint32* buffer32 = buffer; for (mp_uint32 i = 0; i < bufferSize*MP_NUMCHANNELS; i++) { mp_sint32 b = *buffer32++; if (abs(b) > lastPeakValue) lastPeakValue = abs(b); } } void calculateMasterVolume() { if (lastPeakValue) { float v = 32768.0f*(1<<mixerShift) / (float)lastPeakValue; masterVolume = (mp_sint32)((float)masterVolume*v); if (masterVolume > 256) masterVolume = 256; } } }; // export to 16bit stereo WAV mp_sint32 PlayerGeneric::exportToWAV(const SYSCHAR* fileName, XModule* module, mp_sint32 startOrder/* = 0*/, mp_sint32 endOrder/* = -1*/, const mp_ubyte* mutingArray/* = NULL*/, mp_uint32 mutingNumChannels/* = 0*/, const mp_ubyte* customPanningTable/* = NULL*/, AudioDriverBase* preferredDriver/* = NULL*/, mp_sint32* timingLUT/* = NULL*/) { PlayerBase* player = NULL; AudioDriverBase* wavWriter = preferredDriver; bool isWAVWriterDriver = false; if (wavWriter == NULL) { wavWriter = new WAVWriter(fileName); isWAVWriterDriver = true; if (!static_cast<WAVWriter*>(wavWriter)->isOpen()) { delete wavWriter; return MP_DEVICE_ERROR; } } MasterMixer mixer(frequency, bufferSize, 1, wavWriter); mixer.setSampleShift(sampleShift); mixer.setDisableMixing(disableMixing); player = getPreferredPlayer(module); PeakAutoAdjustFilter filter; if (autoAdjustPeak) mixer.setFilterHook(&filter); if (player) { player->adjustFrequency(frequency); player->resetOnStop(resetOnStopFlag); player->setBufferSize(bufferSize); player->setResamplerType(resamplerType); player->setMasterVolume(masterVolume); player->setPlayMode(playMode); player->setDisableMixing(disableMixing); player->setAllowFilters(allowFilters); #ifndef MILKYTRACKER if (player->getType() == PlayerBase::PlayerType_IT) { static_cast<PlayerIT*>(player)->setNumMaxVirChannels(numMaxVirChannels); } #endif mixer.addDevice(player); } if (player) { if (mutingArray && mutingNumChannels > 0 && mutingNumChannels <= module->header.channum) { for (mp_uint32 i = 0; i < mutingNumChannels; i++) player->muteChannel(i, mutingArray[i] == 1); } player->startPlaying(module, false, startOrder, 0, -1, customPanningTable, false, -1); mixer.start(); } if (endOrder == -1 || endOrder < startOrder || endOrder > module->header.ordnum - 1) endOrder = module->header.ordnum - 1; mp_sint32 curOrderPos = startOrder; if (timingLUT) { for (mp_sint32 i = 0; i < module->header.ordnum; i++) timingLUT[i] = -1; timingLUT[curOrderPos] = 0; } while (!player->hasSongHalted() && player->getOrder(0) <= endOrder) { wavWriter->advance(); if (player->getOrder(0) != curOrderPos) { #ifdef __VERBOSE__ printf("%f\n", (float)wavWriter->getNumPlayedSamples() / (float)getMixFrequency()); #endif curOrderPos = player->getOrder(0); if (timingLUT && curOrderPos < module->header.ordnum && timingLUT[curOrderPos] == -1) timingLUT[curOrderPos] = wavWriter->getNumPlayedSamples(); } } player->stopPlaying(); mixer.stop(); // important step, otherwise destruction of the audio driver will cause // trouble if the mixer instance is removed from this function's stack // and trys to access the driver which is no longer existant mixer.closeAudioDevice(); // Sync value sampleShift = mixer.getSampleShift(); filter.mixerShift = sampleShift; filter.calculateMasterVolume(); masterVolume = filter.masterVolume; delete player; mp_sint32 numWrittenSamples = wavWriter->getNumPlayedSamples(); if (isWAVWriterDriver) delete wavWriter; return numWrittenSamples; } bool PlayerGeneric::grabChannelInfo(mp_sint32 chn, TPlayerChannelInfo& channelInfo) const { if (player) return player->grabChannelInfo(chn, channelInfo); return false; } void PlayerGeneric::setNumMaxVirChannels(mp_sint32 max) { numMaxVirChannels = max; #ifndef MILKYTRACKER if (player) { if (player->getType() == PlayerBase::PlayerType_IT) { static_cast<PlayerIT*>(player)->setNumMaxVirChannels(max); } } #endif } mp_sint32 PlayerGeneric::getNumMaxVirChannels() const { #ifndef MILKYTRACKER if (player) { if (player->getType() == PlayerBase::PlayerType_IT) { return static_cast<PlayerIT*>(player)->getNumMaxVirChannels(); } } #endif return numMaxVirChannels; } // milkytracker void PlayerGeneric::setPanning(mp_ubyte chn, mp_ubyte pan) { if (player) player->setPanning(chn, pan); }
./CrossVul/dataset_final_sorted/CWE-416/cpp/bad_4222_0
crossvul-cpp_data_good_2969_0
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002, 2003 // Copyright Dirk Lemstra 2013-2015 // // Implementation of Image // #define MAGICKCORE_IMPLEMENTATION 1 #define MAGICK_PLUSPLUS_IMPLEMENTATION 1 #include "Magick++/Include.h" #include <cstdlib> #include <string> #include <string.h> #include <errno.h> #include <math.h> using namespace std; #include "Magick++/Image.h" #include "Magick++/Functions.h" #include "Magick++/Pixels.h" #include "Magick++/Options.h" #include "Magick++/ImageRef.h" #include "Magick++/ResourceLimits.h" #define AbsoluteValue(x) ((x) < 0 ? -(x) : (x)) #define MagickPI 3.14159265358979323846264338327950288419716939937510 #define DegreesToRadians(x) (MagickPI*(x)/180.0) #define ThrowImageException ThrowPPException(quiet()) MagickPPExport const char *Magick::borderGeometryDefault="6x6+0+0"; MagickPPExport const char *Magick::frameGeometryDefault="25x25+6+6"; MagickPPExport const char *Magick::raiseGeometryDefault="6x6+0+0"; MagickPPExport int Magick::operator == (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels and signature are the same, then the image is identical return((left_.rows() == right_.rows()) && (left_.columns() == right_.columns()) && (left_.signature() == right_.signature())); } MagickPPExport int Magick::operator != (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ == right_)); } MagickPPExport int Magick::operator > (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ < right_) && (left_ != right_)); } MagickPPExport int Magick::operator < (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels are less, then image is smaller return((left_.rows() * left_.columns()) < (right_.rows() * right_.columns())); } MagickPPExport int Magick::operator >= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ > right_) || (left_ == right_)); } MagickPPExport int Magick::operator <= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ < right_) || (left_ == right_)); } Magick::Image::Image(void) : _imgRef(new ImageRef) { } Magick::Image::Image(const Blob &blob_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(blob_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Geometry &size_,const Color &color_) : _imgRef(new ImageRef) { // xc: prefix specifies an X11 color string std::string imageSpec("xc:"); imageSpec+=color_; try { quiet(true); // Set image size size(size_); // Initialize, Allocate and Read images read(imageSpec); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Image &image_) : _imgRef(image_._imgRef) { Lock lock(&_imgRef->_mutexLock); // Increase reference count ++_imgRef->_refCount; } Magick::Image::Image(const Image &image_,const Geometry &geometry_) : _imgRef(new ImageRef) { const RectangleInfo geometry=geometry_; OffsetInfo offset; MagickCore::Image *image; GetPPException; image=CloneImage(image_.constImage(),geometry_.width(),geometry_.height(), MagickTrue,exceptionInfo); replaceImage(image); _imgRef->options(new Options(*image_.constOptions())); offset.x=0; offset.y=0; (void) CopyImagePixels(image,image_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } Magick::Image::Image(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) : _imgRef(new ImageRef) { try { quiet(true); read(width_,height_,map_,type_,pixels_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const std::string &imageSpec_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(imageSpec_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::~Image() { bool doDelete=false; { Lock lock(&_imgRef->_mutexLock); if (--_imgRef->_refCount == 0) doDelete=true; } if (doDelete) delete _imgRef; _imgRef=0; } Magick::Image& Magick::Image::operator=(const Magick::Image &image_) { if (this != &image_) { bool doDelete=false; { Lock lock(&image_._imgRef->_mutexLock); ++image_._imgRef->_refCount; } { Lock lock(&_imgRef->_mutexLock); if (--_imgRef->_refCount == 0) doDelete=true; } if (doDelete) { // Delete old image reference with associated image and options. delete _imgRef; _imgRef=0; } // Use new image reference _imgRef=image_._imgRef; } return(*this); } void Magick::Image::adjoin(const bool flag_) { modifyImage(); options()->adjoin(flag_); } bool Magick::Image::adjoin(void) const { return(constOptions()->adjoin()); } void Magick::Image::antiAlias(const bool flag_) { modifyImage(); options()->antiAlias(flag_); } bool Magick::Image::antiAlias(void) const { return(constOptions()->antiAlias()); } void Magick::Image::animationDelay(const size_t delay_) { modifyImage(); image()->delay=delay_; } size_t Magick::Image::animationDelay(void) const { return(constImage()->delay); } void Magick::Image::animationIterations(const size_t iterations_) { modifyImage(); image()->iterations=iterations_; } size_t Magick::Image::animationIterations(void) const { return(constImage()->iterations); } void Magick::Image::attenuate(const double attenuate_) { char value[MaxTextExtent]; modifyImage(); FormatLocaleString(value,MaxTextExtent,"%.20g",attenuate_); (void) SetImageArtifact(image(),"attenuate",value); } void Magick::Image::backgroundColor(const Color &backgroundColor_) { modifyImage(); if (backgroundColor_.isValid()) image()->background_color=backgroundColor_; else image()->background_color=Color(); options()->backgroundColor(backgroundColor_); } Magick::Color Magick::Image::backgroundColor(void) const { return(constOptions()->backgroundColor()); } void Magick::Image::backgroundTexture(const std::string &backgroundTexture_) { modifyImage(); options()->backgroundTexture(backgroundTexture_); } std::string Magick::Image::backgroundTexture(void) const { return(constOptions()->backgroundTexture()); } size_t Magick::Image::baseColumns(void) const { return(constImage()->magick_columns); } std::string Magick::Image::baseFilename(void) const { return(std::string(constImage()->magick_filename)); } size_t Magick::Image::baseRows(void) const { return(constImage()->magick_rows); } void Magick::Image::blackPointCompensation(const bool flag_) { image()->black_point_compensation=(MagickBooleanType) flag_; } bool Magick::Image::blackPointCompensation(void) const { return(static_cast<bool>(constImage()->black_point_compensation)); } void Magick::Image::borderColor(const Color &borderColor_) { modifyImage(); if (borderColor_.isValid()) image()->border_color=borderColor_; else image()->border_color=Color(); options()->borderColor(borderColor_); } Magick::Color Magick::Image::borderColor(void) const { return(constOptions()->borderColor()); } Magick::Geometry Magick::Image::boundingBox(void) const { RectangleInfo bbox; GetPPException; bbox=GetImageBoundingBox(constImage(),exceptionInfo); ThrowImageException; return(Geometry(bbox)); } void Magick::Image::boxColor(const Color &boxColor_) { modifyImage(); options()->boxColor(boxColor_); } Magick::Color Magick::Image::boxColor(void) const { return(constOptions()->boxColor()); } void Magick::Image::cacheThreshold(const size_t threshold_) { ResourceLimits::memory((MagickSizeType) threshold_); } void Magick::Image::classType(const ClassType class_) { if (classType() == PseudoClass && class_ == DirectClass) { // Use SyncImage to synchronize the DirectClass pixels with the // color map and then set to DirectClass type. modifyImage(); SyncImage(image()); image()->colormap=(PixelPacket *)RelinquishMagickMemory( image()->colormap); image()->storage_class=static_cast<MagickCore::ClassType>(DirectClass); } else if (classType() == DirectClass && class_ == PseudoClass) { // Quantize to create PseudoClass color map modifyImage(); quantizeColors(MaxColormapSize); quantize(); image()->storage_class=static_cast<MagickCore::ClassType>(PseudoClass); } } void Magick::Image::clipMask(const Magick::Image &clipMask_) { modifyImage(); if (clipMask_.isValid()) SetImageClipMask(image(),clipMask_.constImage()); else SetImageClipMask(image(),0); } Magick::Image Magick::Image::clipMask(void) const { MagickCore::Image *image; GetPPException; image=GetImageClipMask(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::colorFuzz(const double fuzz_) { modifyImage(); image()->fuzz=fuzz_; options()->colorFuzz(fuzz_); } double Magick::Image::colorFuzz(void) const { return(constOptions()->colorFuzz()); } void Magick::Image::colorMapSize(const size_t entries_) { if (entries_ > MaxColormapSize) throwExceptionExplicit(OptionError, "Colormap entries must not exceed MaxColormapSize"); modifyImage(); (void) AcquireImageColormap(image(),entries_); } size_t Magick::Image::colorMapSize(void) const { if (!constImage()->colormap) throwExceptionExplicit(OptionError,"Image does not contain a colormap"); return(constImage()->colors); } void Magick::Image::colorSpace(const ColorspaceType colorSpace_) { if (image()->colorspace == colorSpace_) return; modifyImage(); TransformImageColorspace(image(),colorSpace_); throwImageException(); } Magick::ColorspaceType Magick::Image::colorSpace(void) const { return(constImage()->colorspace); } void Magick::Image::colorspaceType(const ColorspaceType colorSpace_) { modifyImage(); SetImageColorspace(image(),colorSpace_); throwImageException(); options()->colorspaceType(colorSpace_); } Magick::ColorspaceType Magick::Image::colorspaceType(void) const { return(constOptions()->colorspaceType()); } void Magick::Image::comment(const std::string &comment_) { modifyImage(); SetImageProperty(image(),"Comment",NULL); if (comment_.length() > 0) SetImageProperty(image(),"Comment",comment_.c_str()); throwImageException(); } std::string Magick::Image::comment(void) const { const char *value; value=GetImageProperty(constImage(),"Comment"); if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::compose(const CompositeOperator compose_) { image()->compose=compose_; } Magick::CompositeOperator Magick::Image::compose(void) const { return(constImage()->compose); } void Magick::Image::compressType(const CompressionType compressType_) { modifyImage(); image()->compression=compressType_; options()->compressType(compressType_); } Magick::CompressionType Magick::Image::compressType(void) const { return(constImage()->compression); } void Magick::Image::debug(const bool flag_) { modifyImage(); options()->debug(flag_); } bool Magick::Image::debug(void) const { return(constOptions()->debug()); } void Magick::Image::density(const Geometry &density_) { modifyImage(); options()->density(density_); if (density_.isValid()) { image()->x_resolution=density_.width(); if (density_.height() != 0) image()->y_resolution=density_.height(); else image()->y_resolution=density_.width(); } else { // Reset to default image()->x_resolution=0; image()->y_resolution=0; } } Magick::Geometry Magick::Image::density(void) const { if (isValid()) { ssize_t x_resolution=72, y_resolution=72; if (constImage()->x_resolution > 0.0) x_resolution=static_cast<ssize_t>(constImage()->x_resolution + 0.5); if (constImage()->y_resolution > 0.0) y_resolution=static_cast<ssize_t>(constImage()->y_resolution + 0.5); return(Geometry(x_resolution,y_resolution)); } return(constOptions()->density()); } void Magick::Image::depth(const size_t depth_) { size_t depth=depth_; if (depth > MAGICKCORE_QUANTUM_DEPTH) depth=MAGICKCORE_QUANTUM_DEPTH; modifyImage(); image()->depth=depth; options()->depth(depth); } size_t Magick::Image::depth(void) const { return(constImage()->depth); } std::string Magick::Image::directory(void) const { if (constImage()->directory) return(std::string(constImage()->directory)); throwExceptionExplicit(CorruptImageWarning, "Image does not contain a directory"); return(std::string()); } void Magick::Image::endian(const Magick::EndianType endian_) { modifyImage(); options()->endian(endian_); image()->endian=endian_; } Magick::EndianType Magick::Image::endian(void) const { return(constImage()->endian); } void Magick::Image::exifProfile(const Magick::Blob &exifProfile_) { if (exifProfile_.data() != 0) { StringInfo *exif_profile; modifyImage(); exif_profile=AcquireStringInfo(exifProfile_.length()); SetStringInfoDatum(exif_profile,(unsigned char *) exifProfile_.data()); (void) SetImageProfile(image(),"exif",exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } Magick::Blob Magick::Image::exifProfile(void) const { const StringInfo *exif_profile; exif_profile=GetImageProfile(constImage(),"exif"); if (exif_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(exif_profile),GetStringInfoLength( exif_profile))); } void Magick::Image::fileName(const std::string &fileName_) { modifyImage(); fileName_.copy(image()->filename,sizeof(image()->filename)-1); image()->filename[fileName_.length()]=0; // Null terminate options()->fileName(fileName_); } std::string Magick::Image::fileName(void) const { return(constOptions()->fileName()); } off_t Magick::Image::fileSize(void) const { return((off_t) GetBlobSize(constImage())); } void Magick::Image::fillColor(const Magick::Color &fillColor_) { modifyImage(); options()->fillColor(fillColor_); } Magick::Color Magick::Image::fillColor(void) const { return(constOptions()->fillColor()); } void Magick::Image::fillRule(const Magick::FillRule &fillRule_) { modifyImage(); options()->fillRule(fillRule_); } Magick::FillRule Magick::Image::fillRule(void) const { return(constOptions()->fillRule()); } void Magick::Image::fillPattern(const Image &fillPattern_) { modifyImage(); if(fillPattern_.isValid()) options()->fillPattern(fillPattern_.constImage()); else options()->fillPattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::fillPattern(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->fillPattern(); if(tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::filterType(const Magick::FilterTypes filterType_) { modifyImage(); image()->filter=filterType_; } Magick::FilterTypes Magick::Image::filterType(void) const { return(constImage()->filter); } void Magick::Image::font(const std::string &font_) { modifyImage(); options()->font(font_); } std::string Magick::Image::font(void) const { return(constOptions()->font()); } void Magick::Image::fontFamily(const std::string &family_) { modifyImage(); options()->fontFamily(family_); } std::string Magick::Image::fontFamily(void) const { return(constOptions()->fontFamily()); } void Magick::Image::fontPointsize(const double pointSize_) { modifyImage(); options()->fontPointsize(pointSize_); } double Magick::Image::fontPointsize(void) const { return(constOptions()->fontPointsize()); } std::string Magick::Image::format(void) const { const MagickInfo *magick_info; GetPPException; magick_info=GetMagickInfo(constImage()->magick,exceptionInfo); ThrowImageException; if ((magick_info != 0) && (*magick_info->description != '\0')) return(std::string(magick_info->description)); throwExceptionExplicit(CorruptImageWarning,"Unrecognized image magick type"); return(std::string()); } void Magick::Image::fontStyle(const StyleType pointSize_) { modifyImage(); options()->fontStyle(pointSize_); } Magick::StyleType Magick::Image::fontStyle(void) const { return(constOptions()->fontStyle()); } void Magick::Image::fontWeight(const size_t weight_) { modifyImage(); options()->fontWeight(weight_); } size_t Magick::Image::fontWeight(void) const { return(constOptions()->fontWeight()); } std::string Magick::Image::formatExpression(const std::string expression) { char *text; std::string text_string; modifyImage(); text=InterpretImageProperties(constImageInfo(),image(),expression.c_str()); if (text != (char *) NULL) { text_string=std::string(text); text=DestroyString(text); } throwImageException(); return(text_string); } double Magick::Image::gamma(void) const { return(constImage()->gamma); } Magick::Geometry Magick::Image::geometry(void) const { if (constImage()->geometry) return(Geometry(constImage()->geometry)); throwExceptionExplicit(OptionWarning,"Image does not contain a geometry"); return(Geometry()); } void Magick::Image::gifDisposeMethod(const size_t disposeMethod_) { modifyImage(); image()->dispose=(DisposeType) disposeMethod_; } size_t Magick::Image::gifDisposeMethod(void) const { // FIXME: It would be better to return an enumeration return ((size_t) constImage()->dispose); } void Magick::Image::highlightColor(const Color color_) { std::string value; value=color_; artifact("highlight-color",value); } void Magick::Image::iccColorProfile(const Magick::Blob &colorProfile_) { profile("icc",colorProfile_); } Magick::Blob Magick::Image::iccColorProfile(void) const { const StringInfo *color_profile; color_profile=GetImageProfile(constImage(),"icc"); if (color_profile == (StringInfo *) NULL) return Blob(); return(Blob(GetStringInfoDatum(color_profile),GetStringInfoLength( color_profile))); } void Magick::Image::interlaceType(const InterlaceType interlace_) { modifyImage(); image()->interlace=interlace_; options()->interlaceType(interlace_); } Magick::InterlaceType Magick::Image::interlaceType(void) const { return constImage()->interlace; } void Magick::Image::interpolate(const InterpolatePixelMethod interpolate_) { modifyImage(); image()->interpolate=interpolate_; } Magick::InterpolatePixelMethod Magick::Image::interpolate(void) const { return constImage()->interpolate; } void Magick::Image::iptcProfile(const Magick::Blob &iptcProfile_) { modifyImage(); if (iptcProfile_.data() != 0) { StringInfo *iptc_profile; iptc_profile=AcquireStringInfo(iptcProfile_.length()); SetStringInfoDatum(iptc_profile,(unsigned char *) iptcProfile_.data()); (void) SetImageProfile(image(),"iptc",iptc_profile); iptc_profile=DestroyStringInfo(iptc_profile ); } } Magick::Blob Magick::Image::iptcProfile(void) const { const StringInfo *iptc_profile; iptc_profile=GetImageProfile(constImage(),"iptc"); if (iptc_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(iptc_profile),GetStringInfoLength( iptc_profile))); } bool Magick::Image::isOpaque(void) const { MagickBooleanType result; GetPPException; result=IsOpaqueImage(constImage(),exceptionInfo); ThrowImageException; return(result != MagickFalse ? true : false); } void Magick::Image::isValid(const bool isValid_) { if (!isValid_) { delete _imgRef; _imgRef = new ImageRef; } else if (!isValid()) { // Construct with single-pixel black image to make // image valid. This is an obvious hack. size(Geometry(1,1)); read("xc:black"); } } bool Magick::Image::isValid(void) const { return(rows() && columns()); } void Magick::Image::label(const std::string &label_) { modifyImage(); (void) SetImageProperty(image(),"Label",NULL); if (label_.length() > 0) (void) SetImageProperty(image(),"Label",label_.c_str()); throwImageException(); } std::string Magick::Image::label(void) const { const char *value; value=GetImageProperty(constImage(),"Label"); if (value) return(std::string(value)); return(std::string()); } void Magick::Image::lowlightColor(const Color color_) { std::string value; value=color_; artifact("lowlight-color",value); } void Magick::Image::magick(const std::string &magick_) { size_t length; modifyImage(); length=sizeof(image()->magick)-1; if (magick_.length() < length) length=magick_.length(); if (!magick_.empty()) magick_.copy(image()->magick,length); image()->magick[length]=0; options()->magick(magick_); } std::string Magick::Image::magick(void) const { if (*(constImage()->magick) != '\0') return(std::string(constImage()->magick)); return(constOptions()->magick()); } void Magick::Image::mask(const Magick::Image &mask_) { modifyImage(); if (mask_.isValid()) SetImageMask(image(),mask_.constImage()); else SetImageMask(image(),0); } Magick::Image Magick::Image::mask(void) const { MagickCore::Image *image; GetPPException; image=GetImageMask(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::matte(const bool matteFlag_) { modifyImage(); // If matte channel is requested, but image doesn't already have a // matte channel, then create an opaque matte channel. Likewise, if // the image already has a matte channel but a matte channel is not // desired, then set the matte channel to opaque. if ((matteFlag_ && !constImage()->matte) || (constImage()->matte && !matteFlag_)) SetImageOpacity(image(),OpaqueOpacity); image()->matte=(MagickBooleanType) matteFlag_; } bool Magick::Image::matte(void) const { if (constImage()->matte) return true; else return false; } void Magick::Image::matteColor(const Color &matteColor_) { modifyImage(); if (matteColor_.isValid()) { image()->matte_color=matteColor_; options()->matteColor(matteColor_); } else { // Set to default matte color Color tmpColor("#BDBDBD"); image()->matte_color=tmpColor; options()->matteColor(tmpColor); } } Magick::Color Magick::Image::matteColor(void) const { return(Color(constImage()->matte_color.red,constImage()->matte_color.green, constImage()->matte_color.blue)); } double Magick::Image::meanErrorPerPixel(void) const { return(constImage()->error.mean_error_per_pixel); } void Magick::Image::modulusDepth(const size_t depth_) { modifyImage(); SetImageDepth(image(),depth_); options()->depth(depth_); } size_t Magick::Image::modulusDepth(void) const { size_t depth; GetPPException; depth=GetImageDepth(constImage(),exceptionInfo); ThrowImageException; return(depth); } void Magick::Image::monochrome(const bool monochromeFlag_) { modifyImage(); options()->monochrome(monochromeFlag_); } bool Magick::Image::monochrome(void) const { return(constOptions()->monochrome()); } Magick::Geometry Magick::Image::montageGeometry(void) const { if (constImage()->montage) return(Magick::Geometry(constImage()->montage)); throwExceptionExplicit(CorruptImageWarning, "Image does not contain a montage"); return(Magick::Geometry()); } double Magick::Image::normalizedMaxError(void) const { return(constImage()->error.normalized_maximum_error); } double Magick::Image::normalizedMeanError(void) const { return (constImage()->error.normalized_mean_error); } void Magick::Image::orientation(const Magick::OrientationType orientation_) { modifyImage(); image()->orientation=orientation_; } Magick::OrientationType Magick::Image::orientation(void) const { return(constImage()->orientation); } void Magick::Image::page(const Magick::Geometry &pageSize_) { modifyImage(); options()->page(pageSize_); image()->page=pageSize_; } Magick::Geometry Magick::Image::page(void) const { return(Geometry(constImage()->page.width,constImage()->page.height, AbsoluteValue(constImage()->page.x),AbsoluteValue(constImage()->page.y), constImage()->page.x < 0 ? true : false, constImage()->page.y < 0 ? true : false)); } void Magick::Image::penColor(const Color &penColor_) { modifyImage(); options()->fillColor(penColor_); options()->strokeColor(penColor_); } Magick::Color Magick::Image::penColor(void) const { return(constOptions()->fillColor()); } void Magick::Image::penTexture(const Image &penTexture_) { modifyImage(); if(penTexture_.isValid()) options()->fillPattern(penTexture_.constImage()); else options()->fillPattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::penTexture(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->fillPattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::quality(const size_t quality_) { modifyImage(); image()->quality=quality_; options()->quality(quality_); } size_t Magick::Image::quality(void) const { return(constImage()->quality); } void Magick::Image::quantizeColors(const size_t colors_) { modifyImage(); options()->quantizeColors(colors_); } size_t Magick::Image::quantizeColors(void) const { return(constOptions()->quantizeColors()); } void Magick::Image::quantizeColorSpace( const Magick::ColorspaceType colorSpace_) { modifyImage(); options()->quantizeColorSpace(colorSpace_); } Magick::ColorspaceType Magick::Image::quantizeColorSpace(void) const { return(constOptions()->quantizeColorSpace()); } void Magick::Image::quantizeDither(const bool ditherFlag_) { modifyImage(); options()->quantizeDither(ditherFlag_); } bool Magick::Image::quantizeDither(void) const { return(constOptions()->quantizeDither()); } void Magick::Image::quantizeDitherMethod(const DitherMethod ditherMethod_) { modifyImage(); options()->quantizeDitherMethod(ditherMethod_); } MagickCore::DitherMethod Magick::Image::quantizeDitherMethod(void) const { return(constOptions()->quantizeDitherMethod()); } void Magick::Image::quantizeTreeDepth(const size_t treeDepth_) { modifyImage(); options()->quantizeTreeDepth(treeDepth_); } size_t Magick::Image::quantizeTreeDepth(void) const { return(constOptions()->quantizeTreeDepth()); } void Magick::Image::quiet(const bool quiet_) { modifyImage(); options()->quiet(quiet_); } bool Magick::Image::quiet(void) const { return(constOptions()->quiet()); } void Magick::Image::renderingIntent( const Magick::RenderingIntent renderingIntent_) { modifyImage(); image()->rendering_intent=renderingIntent_; } Magick::RenderingIntent Magick::Image::renderingIntent(void) const { return(static_cast<Magick::RenderingIntent>( constImage()->rendering_intent)); } void Magick::Image::resolutionUnits( const Magick::ResolutionType resolutionUnits_) { modifyImage(); image()->units=resolutionUnits_; options()->resolutionUnits(resolutionUnits_); } Magick::ResolutionType Magick::Image::resolutionUnits(void) const { return(static_cast<Magick::ResolutionType>(constImage()->units)); } void Magick::Image::scene(const size_t scene_) { modifyImage(); image()->scene=scene_; } size_t Magick::Image::scene(void) const { return(constImage()->scene); } void Magick::Image::size(const Geometry &geometry_) { modifyImage(); options()->size(geometry_); image()->rows=geometry_.height(); image()->columns=geometry_.width(); } Magick::Geometry Magick::Image::size(void) const { return(Magick::Geometry(constImage()->columns,constImage()->rows)); } void Magick::Image::strokeAntiAlias(const bool flag_) { modifyImage(); options()->strokeAntiAlias(flag_); } bool Magick::Image::strokeAntiAlias(void) const { return(constOptions()->strokeAntiAlias()); } void Magick::Image::strokeColor(const Magick::Color &strokeColor_) { std::string value; modifyImage(); options()->strokeColor(strokeColor_); value=strokeColor_; artifact("stroke",value); } Magick::Color Magick::Image::strokeColor(void) const { return(constOptions()->strokeColor()); } void Magick::Image::strokeDashArray(const double *strokeDashArray_) { modifyImage(); options()->strokeDashArray(strokeDashArray_); } const double *Magick::Image::strokeDashArray(void) const { return(constOptions()->strokeDashArray()); } void Magick::Image::strokeDashOffset(const double strokeDashOffset_) { modifyImage(); options()->strokeDashOffset(strokeDashOffset_); } double Magick::Image::strokeDashOffset(void) const { return(constOptions()->strokeDashOffset()); } void Magick::Image::strokeLineCap(const Magick::LineCap lineCap_) { modifyImage(); options()->strokeLineCap(lineCap_); } Magick::LineCap Magick::Image::strokeLineCap(void) const { return(constOptions()->strokeLineCap()); } void Magick::Image::strokeLineJoin(const Magick::LineJoin lineJoin_) { modifyImage(); options()->strokeLineJoin(lineJoin_); } Magick::LineJoin Magick::Image::strokeLineJoin(void) const { return(constOptions()->strokeLineJoin()); } void Magick::Image::strokeMiterLimit(const size_t strokeMiterLimit_) { modifyImage(); options()->strokeMiterLimit(strokeMiterLimit_); } size_t Magick::Image::strokeMiterLimit(void) const { return constOptions()->strokeMiterLimit(); } void Magick::Image::strokePattern(const Image &strokePattern_) { modifyImage(); if(strokePattern_.isValid()) options()->strokePattern(strokePattern_.constImage()); else options()->strokePattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::strokePattern(void) const { const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->strokePattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::strokeWidth(const double strokeWidth_) { char value[MaxTextExtent]; modifyImage(); options()->strokeWidth(strokeWidth_); FormatLocaleString(value,MaxTextExtent,"%.20g",strokeWidth_); (void) SetImageArtifact(image(),"strokewidth",value); } double Magick::Image::strokeWidth(void) const { return(constOptions()->strokeWidth()); } void Magick::Image::subImage(const size_t subImage_) { modifyImage(); options()->subImage(subImage_); } size_t Magick::Image::subImage(void) const { return(constOptions()->subImage()); } void Magick::Image::subRange(const size_t subRange_) { modifyImage(); options()->subRange(subRange_); } size_t Magick::Image::subRange(void) const { return(constOptions()->subRange()); } void Magick::Image::textDirection(DirectionType direction_) { modifyImage(); options()->textDirection(direction_); } Magick::DirectionType Magick::Image::textDirection(void) const { return(constOptions()->textDirection()); } void Magick::Image::textEncoding(const std::string &encoding_) { modifyImage(); options()->textEncoding(encoding_); } std::string Magick::Image::textEncoding(void) const { return(constOptions()->textEncoding()); } void Magick::Image::textGravity(GravityType gravity_) { modifyImage(); options()->textGravity(gravity_); } Magick::GravityType Magick::Image::textGravity(void) const { return(constOptions()->textGravity()); } void Magick::Image::textInterlineSpacing(double spacing_) { modifyImage(); options()->textInterlineSpacing(spacing_); } double Magick::Image::textInterlineSpacing(void) const { return(constOptions()->textInterlineSpacing()); } void Magick::Image::textInterwordSpacing(double spacing_) { modifyImage(); options()->textInterwordSpacing(spacing_); } double Magick::Image::textInterwordSpacing(void) const { return(constOptions()->textInterwordSpacing()); } void Magick::Image::textKerning(double kerning_) { modifyImage(); options()->textKerning(kerning_); } double Magick::Image::textKerning(void) const { return(constOptions()->textKerning()); } void Magick::Image::textUnderColor(const Color &underColor_) { modifyImage(); options()->textUnderColor(underColor_); } Magick::Color Magick::Image::textUnderColor(void) const { return(constOptions()->textUnderColor()); } void Magick::Image::tileName(const std::string &tileName_) { modifyImage(); options()->tileName(tileName_); } std::string Magick::Image::tileName(void) const { return(constOptions()->tileName()); } size_t Magick::Image::totalColors(void) const { size_t colors; GetPPException; colors=GetNumberColors(constImage(),0,exceptionInfo); ThrowImageException; return(colors); } void Magick::Image::transformRotation(const double angle_) { modifyImage(); options()->transformRotation(angle_); } void Magick::Image::transformSkewX(const double skewx_) { modifyImage(); options()->transformSkewX(skewx_); } void Magick::Image::transformSkewY(const double skewy_) { modifyImage(); options()->transformSkewY(skewy_); } void Magick::Image::type(const Magick::ImageType type_) { modifyImage(); options()->type(type_); SetImageType(image(),type_); } Magick::ImageType Magick::Image::type(void) const { if (constOptions()->type() != UndefinedType) return(constOptions()->type()); else if (constImage()->type != UndefinedType) return(constImage()->type); else return(determineType()); } void Magick::Image::verbose(const bool verboseFlag_) { modifyImage(); options()->verbose(verboseFlag_); } bool Magick::Image::verbose(void) const { return(constOptions()->verbose()); } void Magick::Image::view(const std::string &view_) { modifyImage(); options()->view(view_); } std::string Magick::Image::view(void) const { return(constOptions()->view()); } void Magick::Image::virtualPixelMethod( const VirtualPixelMethod virtual_pixel_method_) { modifyImage(); SetImageVirtualPixelMethod(image(),virtual_pixel_method_); options()->virtualPixelMethod(virtual_pixel_method_); } Magick::VirtualPixelMethod Magick::Image::virtualPixelMethod(void) const { return(GetImageVirtualPixelMethod(constImage())); } void Magick::Image::x11Display(const std::string &display_) { modifyImage(); options()->x11Display(display_); } std::string Magick::Image::x11Display(void) const { return(constOptions()->x11Display()); } double Magick::Image::xResolution(void) const { return(constImage()->x_resolution); } double Magick::Image::yResolution(void) const { return(constImage()->y_resolution); } void Magick::Image::adaptiveBlur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveBlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveResize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t width=columns(), height=rows(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=AdaptiveResizeImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveSharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpenChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveSharpenImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveThreshold(const size_t width_,const size_t height_, const ssize_t offset_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveThresholdImage(constImage(),width_,height_,offset_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoise(const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; newImage=AddNoiseImage(constImage(),noiseType_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoiseChannel(const ChannelType channel_, const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; newImage=AddNoiseImageChannel(constImage(),channel_,noiseType_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::affineTransform(const DrawableAffine &affine_ ) { AffineMatrix _affine; MagickCore::Image *newImage; _affine.sx = affine_.sx(); _affine.sy = affine_.sy(); _affine.rx = affine_.rx(); _affine.ry = affine_.ry(); _affine.tx = affine_.tx(); _affine.ty = affine_.ty(); GetPPException; newImage=AffineTransformImage(constImage(),&_affine,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::alphaChannel(AlphaChannelType alphaType_) { modifyImage(); SetImageAlphaChannel(image(), alphaType_); throwImageException(); } void Magick::Image::annotate(const std::string &text_, const Geometry &location_) { annotate(text_,location_,NorthWestGravity,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_) { annotate(text_,boundingArea_,gravity_,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_, const double degrees_) { AffineMatrix oaffine; char boundingArea[MaxTextExtent]; DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->geometry=DestroyString(drawInfo->geometry); if (boundingArea_.isValid()) { if (boundingArea_.width() == 0 || boundingArea_.height() == 0) { FormatLocaleString(boundingArea,MaxTextExtent,"%+.20g%+.20g", (double) boundingArea_.xOff(),(double) boundingArea_.yOff()); } else { (void) CopyMagickString(boundingArea, std::string(boundingArea_).c_str(), MaxTextExtent); } drawInfo->geometry=boundingArea; } drawInfo->gravity=gravity_; oaffine=drawInfo->affine; if (degrees_ != 0.0) { AffineMatrix affine, current; affine.sx=1.0; affine.rx=0.0; affine.ry=0.0; affine.sy=1.0; affine.tx=0.0; affine.ty=0.0; current=drawInfo->affine; affine.sx=cos(DegreesToRadians(fmod(degrees_,360.0))); affine.rx=sin(DegreesToRadians(fmod(degrees_,360.0))); affine.ry=(-sin(DegreesToRadians(fmod(degrees_,360.0)))); affine.sy=cos(DegreesToRadians(fmod(degrees_,360.0))); drawInfo->affine.sx=current.sx*affine.sx+current.ry*affine.rx; drawInfo->affine.rx=current.rx*affine.sx+current.sy*affine.rx; drawInfo->affine.ry=current.sx*affine.ry+current.ry*affine.sy; drawInfo->affine.sy=current.rx*affine.ry+current.sy*affine.sy; drawInfo->affine.tx=current.sx*affine.tx+current.ry*affine.ty +current.tx; } AnnotateImage(image(),drawInfo); // Restore original values drawInfo->affine=oaffine; drawInfo->text=(char *) NULL; drawInfo->geometry=(char *) NULL; throwImageException(); } void Magick::Image::annotate(const std::string &text_, const GravityType gravity_) { DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->gravity=gravity_; AnnotateImage(image(),drawInfo); drawInfo->gravity=NorthWestGravity; drawInfo->text=(char *) NULL; throwImageException(); } void Magick::Image::artifact(const std::string &name_, const std::string &value_) { modifyImage(); (void) SetImageArtifact(image(),name_.c_str(),value_.c_str()); } std::string Magick::Image::artifact(const std::string &name_) const { const char *value; value=GetImageArtifact(constImage(),name_.c_str()); if (value) return(std::string(value)); return(std::string()); } void Magick::Image::attribute(const std::string name_,const char *value_) { modifyImage(); SetImageProperty(image(),name_.c_str(),value_); } void Magick::Image::attribute(const std::string name_,const std::string value_) { modifyImage(); SetImageProperty(image(),name_.c_str(),value_.c_str()); } std::string Magick::Image::attribute(const std::string name_) const { const char *value; value=GetImageProperty(constImage(),name_.c_str()); if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::autoGamma(void) { modifyImage(); (void) AutoGammaImage(image()); throwImageException(); } void Magick::Image::autoGammaChannel(const ChannelType channel_) { modifyImage(); (void) AutoGammaImageChannel(image(),channel_); throwImageException(); } void Magick::Image::autoLevel(void) { modifyImage(); (void) AutoLevelImage(image()); throwImageException(); } void Magick::Image::autoLevelChannel(const ChannelType channel_) { modifyImage(); (void) AutoLevelImageChannel(image(),channel_); throwImageException(); } void Magick::Image::autoOrient(void) { MagickCore::Image *newImage; if (image()->orientation == UndefinedOrientation || image()->orientation == TopLeftOrientation) return; GetPPException; newImage=AutoOrientImage(constImage(),image()->orientation,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blackThreshold(const std::string &threshold_) { modifyImage(); BlackThresholdImage(image(),threshold_.c_str()); throwImageException(); } void Magick::Image::blackThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; BlackThresholdImageChannel(image(),channel_,threshold_.c_str(), exceptionInfo); ThrowImageException; } void Magick::Image::blueShift(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=BlueShiftImage(constImage(),factor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } // Blur image void Magick::Image::blur(const double radius_, const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=BlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blurChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=BlurImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::border(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo borderInfo=geometry_; GetPPException; newImage=BorderImage(constImage(),&borderInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::brightnessContrast(const double brightness_, const double contrast_) { modifyImage(); BrightnessContrastImage(image(),brightness_,contrast_); throwImageException(); } void Magick::Image::brightnessContrastChannel(const ChannelType channel_, const double brightness_,const double contrast_) { modifyImage(); BrightnessContrastImageChannel(image(),channel_,brightness_,contrast_); throwImageException(); } void Magick::Image::cannyEdge(const double radius_,const double sigma_, const double lowerPercent_,const double upperPercent_) { MagickCore::Image *newImage; modifyImage(); GetPPException; newImage=CannyEdgeImage(constImage(),radius_,sigma_,lowerPercent_, upperPercent_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cdl(const std::string &cdl_) { modifyImage(); (void) ColorDecisionListImage(image(),cdl_.c_str()); throwImageException(); } void Magick::Image::channel(const ChannelType channel_) { modifyImage(); SeparateImageChannel(image(),channel_); throwImageException(); } void Magick::Image::channelDepth(const ChannelType channel_, const size_t depth_) { modifyImage(); SetImageChannelDepth(image(),channel_,depth_); throwImageException(); } size_t Magick::Image::channelDepth(const ChannelType channel_) { size_t channel_depth; GetPPException; channel_depth=GetImageChannelDepth(constImage(), channel_,exceptionInfo); ThrowImageException; return channel_depth; } void Magick::Image::charcoal(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=CharcoalImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::chop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo chopInfo=geometry_; GetPPException; newImage=ChopImage(constImage(),&chopInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::chromaBluePrimary(const double x_,const double y_) { modifyImage(); image()->chromaticity.blue_primary.x=x_; image()->chromaticity.blue_primary.y=y_; } void Magick::Image::chromaBluePrimary(double *x_,double *y_) const { *x_=constImage()->chromaticity.blue_primary.x; *y_=constImage()->chromaticity.blue_primary.y; } void Magick::Image::chromaGreenPrimary(const double x_,const double y_) { modifyImage(); image()->chromaticity.green_primary.x=x_; image()->chromaticity.green_primary.y=y_; } void Magick::Image::chromaGreenPrimary(double *x_,double *y_) const { *x_=constImage()->chromaticity.green_primary.x; *y_=constImage()->chromaticity.green_primary.y; } void Magick::Image::chromaRedPrimary(const double x_,const double y_) { modifyImage(); image()->chromaticity.red_primary.x=x_; image()->chromaticity.red_primary.y=y_; } void Magick::Image::chromaRedPrimary(double *x_,double *y_) const { *x_=constImage()->chromaticity.red_primary.x; *y_=constImage()->chromaticity.red_primary.y; } void Magick::Image::chromaWhitePoint(const double x_,const double y_) { modifyImage(); image()->chromaticity.white_point.x=x_; image()->chromaticity.white_point.y=y_; } void Magick::Image::chromaWhitePoint(double *x_,double *y_) const { *x_=constImage()->chromaticity.white_point.x; *y_=constImage()->chromaticity.white_point.y; } void Magick::Image::clamp(void) { modifyImage(); ClampImage(image()); throwImageException(); } void Magick::Image::clampChannel(const ChannelType channel_) { modifyImage(); ClampImageChannel(image(),channel_); throwImageException(); } void Magick::Image::clip(void ) { modifyImage(); ClipImage(image()); throwImageException(); } void Magick::Image::clipPath(const std::string pathname_,const bool inside_) { modifyImage(); ClipImagePath(image(),pathname_.c_str(),(MagickBooleanType) inside_); throwImageException(); } void Magick::Image::clut(const Image &clutImage_) { modifyImage(); ClutImage(image(),clutImage_.constImage()); throwImageException(); } void Magick::Image::clutChannel(const ChannelType channel_, const Image &clutImage_) { modifyImage(); ClutImageChannel(image(),channel_,clutImage_.constImage()); throwImageException(); } void Magick::Image::colorize(const unsigned int opacityRed_, const unsigned int opacityGreen_,const unsigned int opacityBlue_, const Color &penColor_) { char opacity[MaxTextExtent]; MagickCore::Image *newImage; if (!penColor_.isValid()) throwExceptionExplicit( OptionError, "Pen color argument is invalid" ); FormatLocaleString(opacity,MaxTextExtent,"%u/%u/%u",opacityRed_, opacityGreen_,opacityBlue_); GetPPException; newImage=ColorizeImage(image(),opacity,penColor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::colorize(const unsigned int opacity_, const Color &penColor_) { colorize(opacity_,opacity_,opacity_,penColor_); } void Magick::Image::colorMap(const size_t index_,const Color &color_) { if (index_ > (MaxColormapSize-1) ) throwExceptionExplicit(OptionError, "Colormap index must be less than MaxColormapSize"); if (!color_.isValid()) throwExceptionExplicit(OptionError,"Color argument is invalid"); modifyImage(); // Ensure that colormap size is large enough if (colorMapSize() < (index_+1)) colorMapSize(index_+1); // Set color at index in colormap (image()->colormap)[index_]=color_; } Magick::Color Magick::Image::colorMap(const size_t index_) const { if (!constImage()->colormap) { throwExceptionExplicit(OptionError,"Image does not contain a colormap"); return(Color()); } if (index_ > constImage()->colors-1) throwExceptionExplicit(OptionError,"Index out of range"); return(Color((constImage()->colormap)[index_])); } void Magick::Image::colorMatrix(const size_t order_, const double *color_matrix_) { KernelInfo *kernel_info; MagickCore::Image *newImage; GetPPException; kernel_info=AcquireKernelInfo("1"); if (kernel_info != (KernelInfo *) NULL) { kernel_info->width=order_; kernel_info->height=order_; kernel_info->values=(double *) color_matrix_; newImage=ColorMatrixImage(constImage(),kernel_info,exceptionInfo); kernel_info->values=(double *) NULL; kernel_info=DestroyKernelInfo(kernel_info); replaceImage(newImage); ThrowImageException; } } bool Magick::Image::compare(const Image &reference_) { bool status; Image ref=reference_; modifyImage(); ref.modifyImage(); status=static_cast<bool>(IsImagesEqual(image(),ref.constImage())); throwImageException(); return(status); } double Magick::Image::compare(const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); ThrowImageException; return(distortion); } double Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageChannelDistortion(image(),reference_.constImage(),channel_,metric_, &distortion,exceptionInfo); ThrowImageException; return(distortion); } Magick::Image Magick::Image::compare(const Image &reference_, const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; newImage=CompareImages(image(),reference_.constImage(),metric_,distortion, exceptionInfo); ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } Magick::Image Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; newImage=CompareImageChannels(image(),reference_.constImage(),channel_, metric_,distortion,exceptionInfo); ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::composite(const Image &compositeImage_, const Geometry &offset_,const CompositeOperator compose_) { size_t height=rows(), width=columns(); ssize_t x=offset_.xOff(), y=offset_.yOff(); modifyImage(); ParseMetaGeometry(static_cast<std::string>(offset_).c_str(),&x,&y,&width, &height); CompositeImage(image(),compose_,compositeImage_.constImage(),x,y); throwImageException(); } void Magick::Image::composite(const Image &compositeImage_, const GravityType gravity_,const CompositeOperator compose_) { RectangleInfo geometry; modifyImage(); SetGeometry(compositeImage_.constImage(),&geometry); GravityAdjustGeometry(columns(),rows(),gravity_,&geometry); CompositeImage(image(),compose_,compositeImage_.constImage(),geometry.x, geometry.y); throwImageException(); } void Magick::Image::composite(const Image &compositeImage_, const ssize_t xOffset_,const ssize_t yOffset_, const CompositeOperator compose_) { // Image supplied as compositeImage is composited with current image and // results in updating current image. modifyImage(); CompositeImage(image(),compose_,compositeImage_.constImage(),xOffset_, yOffset_); throwImageException(); } void Magick::Image::connectedComponents(const size_t connectivity_) { MagickCore::Image *newImage; GetPPException; newImage=ConnectedComponentsImage(constImage(),connectivity_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::contrast(const size_t sharpen_) { modifyImage(); ContrastImage(image(),(MagickBooleanType) sharpen_); throwImageException(); } void Magick::Image::contrastStretch(const double black_point_, const double white_point_) { modifyImage(); ContrastStretchImageChannel(image(),DefaultChannels,black_point_, white_point_); throwImageException(); } void Magick::Image::contrastStretchChannel(const ChannelType channel_, const double black_point_,const double white_point_) { modifyImage(); ContrastStretchImageChannel(image(),channel_,black_point_,white_point_); throwImageException(); } void Magick::Image::convolve(const size_t order_,const double *kernel_) { MagickCore::Image *newImage; GetPPException; newImage=ConvolveImage(constImage(),order_,kernel_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::copyPixels(const Image &source_,const Geometry &geometry_, const Offset &offset_) { const OffsetInfo offset=offset_; const RectangleInfo geometry=geometry_; GetPPException; (void) CopyImagePixels(image(),source_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } void Magick::Image::crop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo cropInfo=geometry_; GetPPException; newImage=CropImage(constImage(),&cropInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cycleColormap(const ssize_t amount_) { modifyImage(); CycleColormapImage(image(),amount_); throwImageException(); } void Magick::Image::decipher(const std::string &passphrase_) { modifyImage(); GetPPException; DecipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::defineSet(const std::string &magick_, const std::string &key_,bool flag_) { std::string definition; modifyImage(); definition=magick_ + ":" + key_; if (flag_) (void) SetImageOption(imageInfo(),definition.c_str(),""); else DeleteImageOption(imageInfo(),definition.c_str()); } bool Magick::Image::defineSet(const std::string &magick_, const std::string &key_) const { const char *option; std::string key; key=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),key.c_str()); if (option) return(true); return(false); } void Magick::Image::defineValue(const std::string &magick_, const std::string &key_,const std::string &value_) { std::string format; modifyImage(); format=magick_ + ":" + key_; (void) SetImageOption(imageInfo(),format.c_str(),value_.c_str()); } std::string Magick::Image::defineValue(const std::string &magick_, const std::string &key_) const { const char *option; std::string definition; definition=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),definition.c_str()); if (option) return(std::string(option)); return(std::string()); } void Magick::Image::deskew(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=DeskewImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::despeckle(void) { MagickCore::Image *newImage; GetPPException; newImage=DespeckleImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::ImageType Magick::Image::determineType(void) const { ImageType image_type; GetPPException; image_type=GetImageType(constImage(),exceptionInfo); ThrowImageException; return(image_type); } void Magick::Image::display(void) { DisplayImages(imageInfo(),image()); } void Magick::Image::distort(const DistortImageMethod method_, const size_t number_arguments_,const double *arguments_,const bool bestfit_) { MagickCore::Image *newImage; GetPPException; newImage=DistortImage(constImage(),method_,number_arguments_,arguments_, bestfit_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::draw(const Magick::Drawable &drawable_) { DrawingWand *wand; modifyImage(); wand=AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { drawable_.operator()(wand); if (constImage()->exception.severity == UndefinedException) DrawRender(wand); wand=DestroyDrawingWand(wand); } throwImageException(); } void Magick::Image::draw(const std::list<Magick::Drawable> &drawable_) { DrawingWand *wand; modifyImage(); wand=AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { for (std::list<Magick::Drawable>::const_iterator p = drawable_.begin(); p != drawable_.end(); p++) { p->operator()(wand); if (constImage()->exception.severity != UndefinedException) break; } if (constImage()->exception.severity == UndefinedException) DrawRender(wand); wand=DestroyDrawingWand(wand); } throwImageException(); } void Magick::Image::edge(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=EdgeImage(constImage(),radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::emboss(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=EmbossImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::encipher(const std::string &passphrase_) { modifyImage(); GetPPException; EncipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::enhance(void) { MagickCore::Image *newImage; GetPPException; newImage=EnhanceImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::equalize(void) { modifyImage(); EqualizeImage(image()); throwImageException(); } void Magick::Image::erase(void) { modifyImage(); (void) SetImageBackgroundColor(image()); throwImageException(); } void Magick::Image::extent(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo extentInfo; modifyImage(); GetPPException; extentInfo=geometry_; extentInfo.x=geometry_.xOff(); extentInfo.y=geometry_.yOff(); newImage=ExtentImage(constImage(),&extentInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); extent(geometry_); } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { image()->gravity=gravity_; backgroundColor(backgroundColor_); extent(geometry_,gravity_); } void Magick::Image::extent(const Geometry &geometry_, const GravityType gravity_) { RectangleInfo geometry; SetGeometry(image(),&geometry); geometry.width=geometry_.width(); geometry.height=geometry_.height(); GravityAdjustGeometry(image()->columns,image()->rows,gravity_,&geometry); extent(geometry); } void Magick::Image::flip(void) { MagickCore::Image *newImage; GetPPException; newImage=FlipImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,false); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_) { floodFillColor(x_,y_,fillColor_,false); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const bool invert_) { PixelPacket pixel; modifyImage(); pixel=pixelColor(x_,y_); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const Magick::Color &borderColor_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,borderColor_,false); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,borderColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const Magick::Color &borderColor_) { floodFillColor(x_,y_,fillColor_,borderColor_,false); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { PixelPacket pixel; modifyImage(); pixel=static_cast<PixelPacket>(borderColor_); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillOpacity(const ssize_t x_,const ssize_t y_, const unsigned int opacity_,const bool invert_) { MagickPixelPacket target; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(constImage(),&target); pixel=static_cast<PixelPacket>(pixelColor(x_,y_)); target.red=pixel.red; target.green=pixel.green; target.blue=pixel.blue; target.opacity=opacity_; (void) FloodfillPaintImage(image(),OpacityChannel,options()->drawInfo(), &target,x_,y_,(MagickBooleanType)invert_); throwImageException(); } void Magick::Image::floodFillOpacity(const ssize_t x_,const ssize_t y_, const unsigned int opacity_,const PaintMethod method_) { floodFillOpacity(x_,y_,opacity_,method_ == FloodfillMethod ? false : true); } void Magick::Image::floodFillOpacity(const ::ssize_t x_,const ::ssize_t y_, const unsigned int opacity_,const Color &target_,const bool invert_) { MagickPixelPacket target; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(constImage(),&target); pixel=static_cast<PixelPacket>(target_); target.red=pixel.red; target.green=pixel.green; target.blue=pixel.blue; target.opacity=opacity_; (void) FloodfillPaintImage(image(),OpacityChannel,options()->drawInfo(), &target,x_,y_,(MagickBooleanType)invert_); throwImageException(); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,false); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_) { floodFillTexture(x_,y_,texture_,false); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const bool invert_) { PixelPacket pixel; modifyImage(); pixel=static_cast<PixelPacket>(pixelColor(x_,y_)); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const Magick::Color &borderColor_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,borderColor_,false); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,borderColor_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const Magick::Color &borderColor_) { floodFillTexture(x_,y_,texture_,borderColor_,false); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { PixelPacket pixel; modifyImage(); pixel=static_cast<PixelPacket>(borderColor_); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::flop(void) { MagickCore::Image *newImage; GetPPException; newImage=FlopImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fontTypeMetrics(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); if (GetTypeMetrics(image(),drawInfo,&(metrics->_typeMetric)) == MagickFalse) throwImageException(); drawInfo->text=0; } void Magick::Image::fontTypeMetricsMultiline(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); GetMultilineTypeMetrics(image(),drawInfo,&(metrics->_typeMetric)); drawInfo->text=0; } void Magick::Image::frame(const Geometry &geometry_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(geometry_.width()); info.y=static_cast<ssize_t>(geometry_.height()); info.width=columns() + ( static_cast<size_t>(info.x) << 1 ); info.height=rows() + ( static_cast<size_t>(info.y) << 1 ); info.outer_bevel=geometry_.xOff(); info.inner_bevel=geometry_.yOff(); GetPPException; newImage=FrameImage(constImage(),&info,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::frame(const size_t width_,const size_t height_, const ssize_t innerBevel_,const ssize_t outerBevel_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(width_); info.y=static_cast<ssize_t>(height_); info.width=columns() + ( static_cast<size_t>(info.x) << 1 ); info.height=rows() + ( static_cast<size_t>(info.y) << 1 ); info.outer_bevel=static_cast<ssize_t>(outerBevel_); info.inner_bevel=static_cast<ssize_t>(innerBevel_); GetPPException; newImage=FrameImage(constImage(),&info,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression) { MagickCore::Image *newImage; GetPPException; newImage=FxImageChannel(constImage(),DefaultChannels,expression.c_str(), exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression, const Magick::ChannelType channel) { MagickCore::Image *newImage; GetPPException; newImage=FxImageChannel(constImage(),channel,expression.c_str(), exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::gamma(const double gamma_) { char gamma[MaxTextExtent + 1]; FormatLocaleString(gamma,MaxTextExtent,"%3.6f",gamma_); modifyImage(); GammaImage(image(),gamma); } void Magick::Image::gamma(const double gammaRed_,const double gammaGreen_, const double gammaBlue_) { char gamma[MaxTextExtent + 1]; FormatLocaleString(gamma,MaxTextExtent,"%3.6f/%3.6f/%3.6f/",gammaRed_, gammaGreen_,gammaBlue_); modifyImage(); GammaImage(image(),gamma); throwImageException(); } void Magick::Image::gaussianBlur(const double width_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=GaussianBlurImage(constImage(),width_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::gaussianBlurChannel(const ChannelType channel_, const double width_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=GaussianBlurImageChannel(constImage(),channel_,width_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } const Magick::IndexPacket* Magick::Image::getConstIndexes(void) const { const Magick::IndexPacket *result; result=GetVirtualIndexQueue(constImage()); if (!result) throwImageException(); return(result); } const Magick::PixelPacket* Magick::Image::getConstPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) const { const PixelPacket *result; GetPPException; result=GetVirtualPixels(constImage(),x_,y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } Magick::IndexPacket *Magick::Image::getIndexes(void) { Magick::IndexPacket *result; result=GetAuthenticIndexQueue(image()); if(!result) throwImageException(); return(result); } Magick::PixelPacket *Magick::Image::getPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) { PixelPacket *result; modifyImage(); GetPPException; result=GetAuthenticPixels(image(),x_,y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::grayscale(const PixelIntensityMethod method_) { modifyImage(); (void) GrayscaleImage(image(),method_); throwImageException(); } void Magick::Image::haldClut(const Image &clutImage_) { modifyImage(); (void) HaldClutImage(image(),clutImage_.constImage()); throwImageException(); } void Magick::Image::houghLine(const size_t width_,const size_t height_, const size_t threshold_) { MagickCore::Image *newImage; GetPPException; newImage=HoughLineImage(constImage(),width_,height_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::implode(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=ImplodeImage(constImage(),factor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::inverseFourierTransform(const Image &phase_) { inverseFourierTransform(phase_,true); } void Magick::Image::inverseFourierTransform(const Image &phase_, const bool magnitude_) { MagickCore::Image *newImage; GetPPException; newImage=InverseFourierTransformImage(constImage(),phase_.constImage(), magnitude_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwahara(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=KuwaharaImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwaharaChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=KuwaharaImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::level(const double black_point,const double white_point, const double gamma) { char levels[MaxTextExtent]; modifyImage(); FormatLocaleString(levels,MaxTextExtent,"%g,%g,%g",black_point,white_point, gamma); (void) LevelImage(image(),levels); throwImageException(); } void Magick::Image::levelChannel(const Magick::ChannelType channel, const double black_point,const double white_point,const double gamma) { modifyImage(); (void) LevelImageChannel(image(),channel,black_point,white_point,gamma); throwImageException(); } void Magick::Image::levelColors(const Color &blackColor_, const Color &whiteColor_,const bool invert_) { MagickPixelPacket black, white; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(image(),&black); pixel=static_cast<PixelPacket>(blackColor_); black.red=pixel.red; black.green=pixel.green; black.blue=pixel.blue; black.opacity=pixel.opacity; GetMagickPixelPacket(image(),&white); pixel=static_cast<PixelPacket>(whiteColor_); white.red=pixel.red; white.green=pixel.green; white.blue=pixel.blue; white.opacity=pixel.opacity; (void) LevelColorsImage(image(),&black,&white, invert_ == true ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::levelColorsChannel(const ChannelType channel_, const Color &blackColor_,const Color &whiteColor_,const bool invert_) { MagickPixelPacket black, white; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(image(),&black); pixel=static_cast<PixelPacket>(blackColor_); black.red=pixel.red; black.green=pixel.green; black.blue=pixel.blue; black.opacity=pixel.opacity; GetMagickPixelPacket(image(),&white); pixel=static_cast<PixelPacket>(whiteColor_); white.red=pixel.red; white.green=pixel.green; white.blue=pixel.blue; white.opacity=pixel.opacity; (void) LevelColorsImageChannel(image(),channel_,&black,&white, invert_ == true ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::levelize(const double blackPoint_,const double whitePoint_, const double gamma_) { modifyImage(); (void) LevelizeImage(image(),blackPoint_,whitePoint_,gamma_); throwImageException(); } void Magick::Image::levelizeChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_,const double gamma_) { modifyImage(); (void) LevelizeImageChannel(image(),channel_,blackPoint_,whitePoint_,gamma_); throwImageException(); } void Magick::Image::linearStretch(const double blackPoint_, const double whitePoint_) { modifyImage(); LinearStretchImage(image(),blackPoint_,whitePoint_); throwImageException(); } void Magick::Image::liquidRescale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=LiquidRescaleImage(constImage(),width,height,x,y,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::localContrast(const double radius_,const double strength_) { MagickCore::Image *newImage; GetPPException; newImage=LocalContrastImage(constImage(),radius_,strength_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::magnify(void) { MagickCore::Image *newImage; GetPPException; newImage=MagnifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::map(const Image &mapImage_,const bool dither_) { modifyImage(); options()->quantizeDither(dither_); RemapImage(options()->quantizeInfo(),image(),mapImage_.constImage()); throwImageException(); } void Magick::Image::matteFloodfill(const Color &target_, const unsigned int opacity_,const ssize_t x_,const ssize_t y_, const Magick::PaintMethod method_) { floodFillOpacity(x_,y_,opacity_,target_, method_ == FloodfillMethod ? false : true); } void Magick::Image::medianFilter(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(constImage(),MedianStatistic,(size_t) radius_, (size_t) radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::mergeLayers(const ImageLayerMethod layerMethod_) { MagickCore::Image *newImage; GetPPException; newImage=MergeImageLayers(image(),layerMethod_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::minify(void) { MagickCore::Image *newImage; GetPPException; newImage=MinifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::modulate(const double brightness_,const double saturation_, const double hue_) { char modulate[MaxTextExtent + 1]; FormatLocaleString(modulate,MaxTextExtent,"%3.6f,%3.6f,%3.6f",brightness_, saturation_,hue_); modifyImage(); ModulateImage(image(),modulate); throwImageException(); } Magick::ImageMoments Magick::Image::moments(void) const { return(ImageMoments(*this)); } void Magick::Image::morphology(const MorphologyMethod method_, const std::string kernel_,const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; kernel=AcquireKernelInfo(kernel_.c_str()); if (kernel == (KernelInfo *)NULL) throwExceptionExplicit(OptionError,"Unable to parse kernel."); GetPPException; newImage=MorphologyImage(constImage(),method_,iterations_,kernel, exceptionInfo); replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphology(const MorphologyMethod method_, const KernelInfoType kernel_,const std::string arguments_, const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(OptionError,"Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphology(method_,kernel,iterations_); } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const std::string kernel_, const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; kernel=AcquireKernelInfo(kernel_.c_str()); if (kernel == (KernelInfo *)NULL) { throwExceptionExplicit(OptionError,"Unable to parse kernel."); return; } GetPPException; newImage=MorphologyImageChannel(constImage(),channel_,method_,iterations_, kernel,exceptionInfo); replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const KernelInfoType kernel_, const std::string arguments_,const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(OptionError,"Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphologyChannel(channel_,method_,kernel,iterations_); } void Magick::Image::motionBlur(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=MotionBlurImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::negate(const bool grayscale_) { modifyImage(); NegateImage(image(),(MagickBooleanType) grayscale_); throwImageException(); } void Magick::Image::negateChannel(const ChannelType channel_, const bool grayscale_) { modifyImage(); NegateImageChannel(image(),channel_,(MagickBooleanType) grayscale_); throwImageException(); } void Magick::Image::normalize(void) { modifyImage(); NormalizeImage(image()); throwImageException(); } void Magick::Image::oilPaint(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=OilPaintImage(constImage(),radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::opacity(const unsigned int opacity_) { modifyImage(); SetImageOpacity(image(),opacity_); } void Magick::Image::opaque(const Color &opaqueColor_,const Color &penColor_, const bool invert_) { MagickPixelPacket opaque, pen; std::string opaqueColor, penColor; if (!opaqueColor_.isValid()) throwExceptionExplicit(OptionError,"Opaque color argument is invalid"); if (!penColor_.isValid()) throwExceptionExplicit(OptionError,"Pen color argument is invalid"); opaqueColor=opaqueColor_; penColor=penColor_; (void) QueryMagickColor(opaqueColor.c_str(),&opaque,&image()->exception); (void) QueryMagickColor(penColor.c_str(),&pen,&image()->exception); modifyImage(); OpaquePaintImage(image(),&opaque,&pen,invert_ ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::orderedDither(std::string thresholdMap_) { modifyImage(); GetPPException; (void) OrderedPosterizeImage(image(),thresholdMap_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::orderedDitherChannel(const ChannelType channel_, std::string thresholdMap_) { modifyImage(); GetPPException; (void) OrderedPosterizeImageChannel(image(),channel_,thresholdMap_.c_str(), exceptionInfo); ThrowImageException; } void Magick::Image::perceptible(const double epsilon_) { modifyImage(); PerceptibleImage(image(),epsilon_); throwImageException(); } void Magick::Image::perceptibleChannel(const ChannelType channel_, const double epsilon_) { modifyImage(); PerceptibleImageChannel(image(),channel_,epsilon_); throwImageException(); } void Magick::Image::ping(const Blob& blob_) { MagickCore::Image *newImage; GetPPException; newImage=PingBlob(imageInfo(),blob_.data(),blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::ping(const std::string &imageSpec_) { MagickCore::Image *newImage; GetPPException; options()->fileName(imageSpec_); newImage=PingImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::pixelColor(const ssize_t x_,const ssize_t y_, const Color &color_) { // Test arguments to ensure they are within the image. if (y_ > (ssize_t) rows() || x_ > (ssize_t) columns()) throwExceptionExplicit(OptionError,"Access outside of image boundary"); modifyImage(); // Set image to DirectClass classType(DirectClass); // Get pixel view Pixels pixels(*this); // Set pixel value *(pixels.get(x_,y_,1,1))=color_; // Tell ImageMagick that pixels have been updated pixels.sync(); } Magick::Color Magick::Image::pixelColor(const ssize_t x_, const ssize_t y_) const { ClassType storage_class; storage_class=classType(); if (storage_class == DirectClass) { const PixelPacket *pixel; pixel=getConstPixels(x_,y_,1,1); if (pixel) return(Color(*pixel)); } else if (storage_class == PseudoClass) { const IndexPacket *indexes; indexes=getConstIndexes(); if(indexes) return(colorMap((size_t) *indexes)); } return(Color()); // invalid } void Magick::Image::polaroid(const std::string &caption_,const double angle_) { MagickCore::Image *newImage; GetPPException; (void) SetImageProperty(image(),"Caption",caption_.c_str()); newImage=PolaroidImage(constImage(),options()->drawInfo(),angle_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::posterize(const size_t levels_,const bool dither_) { modifyImage(); PosterizeImage(image(),levels_,(MagickBooleanType) dither_); throwImageException(); } void Magick::Image::posterizeChannel(const ChannelType channel_, const size_t levels_,const bool dither_) { modifyImage(); PosterizeImageChannel(image(),channel_,levels_, (MagickBooleanType) dither_); throwImageException(); } void Magick::Image::process(std::string name_,const ssize_t argc, const char **argv) { size_t status; modifyImage(); status=InvokeDynamicImageFilter(name_.c_str(),&image(),argc, argv, &image()->exception); if (status == false) throwImageException(); } void Magick::Image::profile(const std::string name_, const Magick::Blob &profile_) { ssize_t result; modifyImage(); result=ProfileImage(image(),name_.c_str(),(unsigned char *)profile_.data(), profile_.length(),MagickTrue); if (!result) throwImageException(); } Magick::Blob Magick::Image::profile(const std::string name_) const { const StringInfo *profile; profile=GetImageProfile(constImage(),name_.c_str()); if (profile == (StringInfo *) NULL) return(Blob()); return(Blob((void*) GetStringInfoDatum(profile),GetStringInfoLength( profile))); } void Magick::Image::quantize(const bool measureError_) { modifyImage(); if (measureError_) options()->quantizeInfo()->measure_error=MagickTrue; else options()->quantizeInfo()->measure_error=MagickFalse; QuantizeImage(options()->quantizeInfo(),image()); throwImageException(); } void Magick::Image::quantumOperator(const ChannelType channel_, const MagickEvaluateOperator operator_,double rvalue_) { GetPPException; EvaluateImageChannel(image(),channel_,operator_,rvalue_,exceptionInfo); ThrowImageException; } void Magick::Image::quantumOperator(const ChannelType channel_, const MagickFunction function_,const size_t number_parameters_, const double *parameters_) { GetPPException; FunctionImageChannel(image(),channel_,function_,number_parameters_, parameters_,exceptionInfo); ThrowImageException; } void Magick::Image::quantumOperator(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_,const ChannelType channel_, const MagickEvaluateOperator operator_,const double rvalue_) { MagickCore::Image *cropImage; RectangleInfo geometry; GetPPException; geometry.width=columns_; geometry.height=rows_; geometry.x=x_; geometry.y=y_; cropImage=CropImage(image(),&geometry,exceptionInfo); EvaluateImageChannel(cropImage,channel_,operator_,rvalue_,exceptionInfo); (void) CompositeImage(image(),image()->matte != MagickFalse ? OverCompositeOp : CopyCompositeOp,cropImage,geometry.x, geometry.y); cropImage=DestroyImageList(cropImage); ThrowImageException; } void Magick::Image::raise(const Geometry &geometry_,const bool raisedFlag_) { RectangleInfo raiseInfo; raiseInfo=geometry_; modifyImage(); RaiseImage(image(),&raiseInfo,raisedFlag_ == true ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::randomThreshold( const Geometry &thresholds_ ) { GetPPException; modifyImage(); (void) RandomThresholdImage(image(),static_cast<std::string>( thresholds_).c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::randomThresholdChannel(const Geometry &thresholds_, const ChannelType channel_) { GetPPException; modifyImage(); (void) RandomThresholdImageChannel(image(),channel_,static_cast<std::string>( thresholds_).c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::read(const Blob &blob_) { MagickCore::Image *newImage; GetPPException; newImage=BlobToImage(imageInfo(),static_cast<const void *>(blob_.data()), blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::read(const Blob &blob_,const Geometry &size_) { size(size_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_) { size(size_); depth(depth_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) { size(size_); depth(depth_); magick(magick_); fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const std::string &magick_) { size(size_); magick(magick_); fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Geometry &size_,const std::string &imageSpec_) { size(size_); read(imageSpec_); } void Magick::Image::read(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) { MagickCore::Image *newImage; GetPPException; newImage=ConstituteImage(width_,height_,map_.c_str(),type_,pixels_, exceptionInfo); replaceImage(newImage); ThrowImageException; if (newImage) throwException(&newImage->exception,quiet()); } void Magick::Image::read(const std::string &imageSpec_) { MagickCore::Image *newImage; options()->fileName(imageSpec_); GetPPException; newImage=ReadImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::readPixels(const Magick::QuantumType quantum_, const unsigned char *source_) { QuantumInfo *quantum_info; GetPPException; quantum_info=AcquireQuantumInfo(imageInfo(),image()); ImportQuantumPixels(image(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,source_,exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::reduceNoise(const double order_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(constImage(),NonpeakStatistic,(size_t) order_, (size_t) order_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::repage() { modifyImage(); options()->page(Geometry()); image()->page.width = 0; image()->page.height = 0; image()->page.x = 0; image()->page.y = 0; } void Magick::Image::resample(const Geometry &geometry_) { MagickCore::Image *newImage; size_t width=columns(), height=rows(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x, &y,&width, &height); GetPPException; newImage=ResampleImage(constImage(),width,height,image()->filter,1.0, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::resize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t width=columns(), height=rows(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x, &y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter,1.0, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const Geometry &roll_) { MagickCore::Image *newImage; ssize_t xOff=roll_.xOff(), yOff=roll_.yOff(); if (roll_.xNegative()) xOff=0-xOff; if (roll_.yNegative()) yOff=0-yOff; GetPPException; newImage=RollImage(constImage(),xOff,yOff,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const size_t columns_,const size_t rows_) { MagickCore::Image *newImage; GetPPException; newImage=RollImage(constImage(),static_cast<ssize_t>(columns_), static_cast<ssize_t>(rows_),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotate(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=RotateImage(constImage(),degrees_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlur(const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=RotationalBlurImage(constImage(),angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlurChannel(const ChannelType channel_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=RotationalBlurImageChannel(constImage(),channel_,angle_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sample(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=SampleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::scale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ScaleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::segment(const double clusterThreshold_, const double smoothingThreshold_) { modifyImage(); SegmentImage(image(),options()->quantizeColorSpace(), (MagickBooleanType) options()->verbose(),clusterThreshold_, smoothingThreshold_); throwImageException(); SyncImage(image()); throwImageException(); } void Magick::Image::selectiveBlur(const double radius_,const double sigma_, const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SelectiveBlurImage(constImage(),radius_,sigma_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::selectiveBlurChannel(const ChannelType channel_, const double radius_,const double sigma_,const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SelectiveBlurImageChannel(constImage(),channel_,radius_,sigma_, threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::separate(const ChannelType channel_) const { MagickCore::Image *image; GetPPException; image=SeparateImage(constImage(),channel_,exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::sepiaTone(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SepiaToneImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::PixelPacket *Magick::Image::setPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) { PixelPacket *result; modifyImage(); GetPPException; result=QueueAuthenticPixels(image(),x_, y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::shade(const double azimuth_,const double elevation_, const bool colorShading_) { MagickCore::Image *newImage; GetPPException; newImage=ShadeImage(constImage(),colorShading_ == true ? MagickTrue : MagickFalse,azimuth_,elevation_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shadow(const double percent_opacity_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=ShadowImage(constImage(),percent_opacity_,sigma_,x_,y_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=SharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpenChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=SharpenImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shave(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo shaveInfo=geometry_; GetPPException; newImage=ShaveImage(constImage(),&shaveInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shear(const double xShearAngle_,const double yShearAngle_) { MagickCore::Image *newImage; GetPPException; newImage=ShearImage(constImage(),xShearAngle_,yShearAngle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sigmoidalContrast(const size_t sharpen_, const double contrast,const double midpoint) { modifyImage(); (void) SigmoidalContrastImageChannel(image(),DefaultChannels, (MagickBooleanType) sharpen_,contrast,midpoint); throwImageException(); } std::string Magick::Image::signature(const bool force_) const { const char *property; Lock lock(&_imgRef->_mutexLock); // Re-calculate image signature if necessary if (force_ || !GetImageProperty(constImage(), "Signature") || constImage()->taint) SignatureImage(const_cast<MagickCore::Image *>(constImage())); property=GetImageProperty(constImage(),"Signature"); return(std::string(property)); } void Magick::Image::sketch(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=SketchImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::solarize(const double factor_) { modifyImage(); SolarizeImage(image(),factor_); throwImageException(); } void Magick::Image::sparseColor(const ChannelType channel, const SparseColorMethod method,const size_t number_arguments, const double *arguments) { MagickCore::Image *newImage; GetPPException; newImage=SparseColorImage(constImage(),channel,method,number_arguments, arguments,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo spliceInfo=geometry_; GetPPException; newImage=SpliceImage(constImage(),&spliceInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); splice(geometry_); } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { backgroundColor(backgroundColor_); image()->gravity=gravity_; splice(geometry_); } void Magick::Image::spread(const size_t amount_) { MagickCore::Image *newImage; GetPPException; newImage=SpreadImage(constImage(),amount_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::statistics(ImageStatistics *statistics) const { double maximum, minimum; GetPPException; (void) GetImageChannelRange(constImage(),RedChannel,&minimum,&maximum, exceptionInfo); statistics->red.minimum=minimum; statistics->red.maximum=maximum; (void) GetImageChannelMean(constImage(),RedChannel,&statistics->red.mean, &statistics->red.standard_deviation,exceptionInfo); (void) GetImageChannelKurtosis(constImage(),RedChannel, &statistics->red.kurtosis,&statistics->red.skewness,exceptionInfo); (void) GetImageChannelRange(constImage(),GreenChannel,&minimum,&maximum, exceptionInfo); statistics->green.minimum=minimum; statistics->green.maximum=maximum; (void) GetImageChannelMean(constImage(),GreenChannel,&statistics->green.mean, &statistics->green.standard_deviation,exceptionInfo); (void) GetImageChannelKurtosis(constImage(),GreenChannel, &statistics->green.kurtosis,&statistics->green.skewness,exceptionInfo); (void) GetImageChannelRange(constImage(),BlueChannel,&minimum,&maximum, exceptionInfo); statistics->blue.minimum=minimum; statistics->blue.maximum=maximum; (void) GetImageChannelMean(constImage(),BlueChannel,&statistics->blue.mean, &statistics->blue.standard_deviation,exceptionInfo); (void) GetImageChannelKurtosis(constImage(),BlueChannel, &statistics->blue.kurtosis,&statistics->blue.skewness,exceptionInfo); (void) GetImageChannelRange(constImage(),OpacityChannel,&minimum,&maximum, exceptionInfo); statistics->opacity.minimum=minimum; statistics->opacity.maximum=maximum; (void) GetImageChannelMean(constImage(),OpacityChannel, &statistics->opacity.mean,&statistics->opacity.standard_deviation, exceptionInfo); (void) GetImageChannelKurtosis(constImage(),OpacityChannel, &statistics->opacity.kurtosis,&statistics->opacity.skewness, exceptionInfo); ThrowImageException; } void Magick::Image::stegano(const Image &watermark_) { MagickCore::Image *newImage; GetPPException; newImage=SteganoImage(constImage(),watermark_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::stereo(const Image &rightImage_) { MagickCore::Image *newImage; GetPPException; newImage=StereoImage(constImage(),rightImage_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::strip(void) { modifyImage(); StripImage(image()); throwImageException(); } Magick::Image Magick::Image::subImageSearch(const Image &reference_, const MetricType metric_,Geometry *offset_,double *similarityMetric_, const double similarityThreshold) { char artifact[MaxTextExtent]; MagickCore::Image *newImage; RectangleInfo offset; modifyImage(); (void) FormatLocaleString(artifact,MaxTextExtent,"%g",similarityThreshold); (void) SetImageArtifact(image(),"compare:similarity-threshold",artifact); GetPPException; newImage=SimilarityMetricImage(image(),reference_.constImage(),metric_, &offset,similarityMetric_,exceptionInfo); ThrowImageException; if (offset_ != (Geometry *) NULL) *offset_=offset; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::swirl(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=SwirlImage(constImage(),degrees_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::syncPixels(void) { GetPPException; (void) SyncAuthenticPixels(image(),exceptionInfo); ThrowImageException; } void Magick::Image::texture(const Image &texture_) { modifyImage(); TextureImage(image(),texture_.constImage()); throwImageException(); } void Magick::Image::threshold(const double threshold_) { modifyImage(); BilevelImage(image(),threshold_); throwImageException(); } void Magick::Image::thumbnail(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ThumbnailImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::tint(const std::string opacity_) { MagickCore::Image *newImage; GetPPException; newImage=TintImage(constImage(),opacity_.c_str(),constOptions()->fillColor(), exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transform(const Geometry &imageGeometry_) { modifyImage(); TransformImage(&(image()),0,std::string(imageGeometry_).c_str()); throwImageException(); } void Magick::Image::transform(const Geometry &imageGeometry_, const Geometry &cropGeometry_) { modifyImage(); TransformImage(&(image()),std::string(cropGeometry_).c_str(), std::string(imageGeometry_).c_str()); throwImageException(); } void Magick::Image::transformOrigin(const double x_,const double y_) { modifyImage(); options()->transformOrigin(x_,y_); } void Magick::Image::transformReset(void) { modifyImage(); options()->transformReset(); } void Magick::Image::transformScale(const double sx_,const double sy_) { modifyImage(); options()->transformScale(sx_,sy_); } void Magick::Image::transparent(const Color &color_) { MagickPixelPacket target; std::string color; if (!color_.isValid()) throwExceptionExplicit(OptionError,"Color argument is invalid"); color=color_; (void) QueryMagickColor(std::string(color_).c_str(),&target, &image()->exception); modifyImage(); TransparentPaintImage(image(),&target,TransparentOpacity,MagickFalse); throwImageException(); } void Magick::Image::transparentChroma(const Color &colorLow_, const Color &colorHigh_) { MagickPixelPacket targetHigh, targetLow; std::string colorHigh, colorLow; if (!colorLow_.isValid() || !colorHigh_.isValid()) throwExceptionExplicit(OptionError,"Color argument is invalid"); colorLow=colorLow_; colorHigh=colorHigh_; (void) QueryMagickColor(colorLow.c_str(),&targetLow,&image()->exception); (void) QueryMagickColor(colorHigh.c_str(),&targetHigh,&image()->exception); modifyImage(); TransparentPaintImageChroma(image(),&targetLow,&targetHigh, TransparentOpacity,MagickFalse); throwImageException(); } void Magick::Image::transpose(void) { MagickCore::Image *newImage; GetPPException; newImage=TransposeImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transverse(void) { MagickCore::Image *newImage; GetPPException; newImage=TransverseImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::trim(void) { MagickCore::Image *newImage; GetPPException; newImage=TrimImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::uniqueColors(void) const { MagickCore::Image *image; GetPPException; image=UniqueImageColors(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::unsharpmask(const double radius_,const double sigma_, const double amount_,const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=UnsharpMaskImage(constImage(),radius_,sigma_,amount_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::unsharpmaskChannel(const ChannelType channel_, const double radius_,const double sigma_,const double amount_, const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=UnsharpMaskImageChannel(constImage(),channel_,radius_,sigma_, amount_,threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::vignette(const double radius_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=VignetteImage(constImage(),radius_,sigma_,x_,y_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::wave(const double amplitude_,const double wavelength_) { MagickCore::Image *newImage; GetPPException; newImage=WaveImage(constImage(),amplitude_,wavelength_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::waveletDenoise(const double threshold_, const double softness_) { MagickCore::Image *newImage; GetPPException; newImage=WaveletDenoiseImage(constImage(),threshold_,softness_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::whiteThreshold(const std::string &threshold_) { modifyImage(); WhiteThresholdImage(image(),threshold_.c_str()); throwImageException(); } void Magick::Image::whiteThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; WhiteThresholdImageChannel(image(),channel_,threshold_.c_str(), exceptionInfo); ThrowImageException; } void Magick::Image::write(Blob *blob_) { size_t length=0; void *data; modifyImage(); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; throwImageException(); } void Magick::Image::write(Blob *blob_,const std::string &magick_) { size_t length=0; void *data; modifyImage(); magick(magick_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; throwImageException(); } void Magick::Image::write(Blob *blob_,const std::string &magick_, const size_t depth_) { size_t length=0; void *data; modifyImage(); magick(magick_); depth(depth_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; throwImageException(); } void Magick::Image::write(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_,const std::string &map_, const StorageType type_,void *pixels_) { GetPPException; ExportImagePixels(constImage(),x_,y_,columns_,rows_,map_.c_str(),type_, pixels_,exceptionInfo); ThrowImageException; } void Magick::Image::write(const std::string &imageSpec_) { modifyImage(); fileName(imageSpec_); WriteImage(constImageInfo(),image()); throwImageException(); } void Magick::Image::writePixels(const Magick::QuantumType quantum_, unsigned char *destination_) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(imageInfo(),image()); GetPPException; ExportQuantumPixels(constImage(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,destination_,exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::zoom(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter,image()->blur, exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image::Image(MagickCore::Image *image_) : _imgRef(new ImageRef(image_)) { } MagickCore::Image *&Magick::Image::image(void) { return(_imgRef->image()); } const MagickCore::Image *Magick::Image::constImage(void) const { return(_imgRef->image()); } MagickCore::ImageInfo *Magick::Image::imageInfo(void) { return(_imgRef->options()->imageInfo()); } const MagickCore::ImageInfo *Magick::Image::constImageInfo(void) const { return(_imgRef->options()->imageInfo()); } Magick::Options *Magick::Image::options(void) { return(_imgRef->options()); } const Magick::Options *Magick::Image::constOptions(void) const { return(_imgRef->options()); } MagickCore::QuantizeInfo *Magick::Image::quantizeInfo(void) { return(_imgRef->options()->quantizeInfo()); } const MagickCore::QuantizeInfo *Magick::Image::constQuantizeInfo(void) const { return(_imgRef->options()->quantizeInfo()); } void Magick::Image::modifyImage(void) { { Lock lock(&_imgRef->_mutexLock); if (_imgRef->_refCount == 1) return; } GetPPException; replaceImage(CloneImage(constImage(),0,0,MagickTrue,exceptionInfo)); ThrowImageException; return; } MagickCore::Image *Magick::Image::replaceImage(MagickCore::Image *replacement_) { MagickCore::Image *image; if (replacement_) image=replacement_; else image=AcquireImage(constImageInfo()); { Lock lock(&_imgRef->_mutexLock); if (_imgRef->_refCount == 1) { // We own the image, just replace it, and de-register _imgRef->image(image); } else { // We don't own the image, dereference and replace with copy --_imgRef->_refCount; _imgRef=new ImageRef(image,constOptions()); } } return(_imgRef->_image); } void Magick::Image::throwImageException(void) const { // Throw C++ exception while resetting Image exception to default state throwException(&const_cast<MagickCore::Image*>(constImage())->exception, quiet()); } void Magick::Image::read(MagickCore::Image *image, MagickCore::ExceptionInfo *exceptionInfo) { // Ensure that multiple image frames were not read. if (image != (MagickCore::Image *) NULL && image->next != (MagickCore::Image *) NULL) { MagickCore::Image *next; // Destroy any extra image frames next=image->next; image->next=(MagickCore::Image *) NULL; next->previous=(MagickCore::Image *) NULL; DestroyImageList(next); } replaceImage(image); if (exceptionInfo->severity == MagickCore::UndefinedException && image == (MagickCore::Image *) NULL) { (void) MagickCore::DestroyExceptionInfo(exceptionInfo); if (!quiet()) throwExceptionExplicit(MagickCore::ImageWarning, "No image was loaded."); } else { ThrowImageException; } if (image != (MagickCore::Image *) NULL) throwException(&image->exception,quiet()); } void Magick::Image::floodFill(const ssize_t x_,const ssize_t y_, const Magick::Image *fillPattern_,const Magick::Color &fill_, const MagickCore::PixelPacket *target_,const bool invert_) { Magick::Color fillColor; MagickCore::Image *fillPattern; MagickPixelPacket target; // Set drawing fill pattern or fill color fillColor=options()->fillColor(); fillPattern=(MagickCore::Image *)NULL; if (options()->fillPattern() != (MagickCore::Image *)NULL) { GetPPException; fillPattern=CloneImage(options()->fillPattern(),0,0,MagickTrue, exceptionInfo); ThrowImageException; } if (fillPattern_ == (Magick::Image *)NULL) { options()->fillPattern((MagickCore::Image *)NULL); options()->fillColor(fill_); } else options()->fillPattern(fillPattern_->constImage()); GetMagickPixelPacket(image(),&target); target.red=target_->red; target.green=target_->green; target.blue=target_->blue; (void) FloodfillPaintImage(image(),DefaultChannels,options()->drawInfo(), &target,static_cast<ssize_t>(x_),static_cast<ssize_t>(y_), (MagickBooleanType) invert_); options()->fillColor(fillColor); options()->fillPattern(fillPattern); throwImageException(); }
./CrossVul/dataset_final_sorted/CWE-416/cpp/good_2969_0
crossvul-cpp_data_bad_2969_0
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002, 2003 // Copyright Dirk Lemstra 2013-2015 // // Implementation of Image // #define MAGICKCORE_IMPLEMENTATION 1 #define MAGICK_PLUSPLUS_IMPLEMENTATION 1 #include "Magick++/Include.h" #include <cstdlib> #include <string> #include <string.h> #include <errno.h> #include <math.h> using namespace std; #include "Magick++/Image.h" #include "Magick++/Functions.h" #include "Magick++/Pixels.h" #include "Magick++/Options.h" #include "Magick++/ImageRef.h" #include "Magick++/ResourceLimits.h" #define AbsoluteValue(x) ((x) < 0 ? -(x) : (x)) #define MagickPI 3.14159265358979323846264338327950288419716939937510 #define DegreesToRadians(x) (MagickPI*(x)/180.0) #define ThrowImageException ThrowPPException(quiet()) MagickPPExport const char *Magick::borderGeometryDefault="6x6+0+0"; MagickPPExport const char *Magick::frameGeometryDefault="25x25+6+6"; MagickPPExport const char *Magick::raiseGeometryDefault="6x6+0+0"; MagickPPExport int Magick::operator == (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels and signature are the same, then the image is identical return((left_.rows() == right_.rows()) && (left_.columns() == right_.columns()) && (left_.signature() == right_.signature())); } MagickPPExport int Magick::operator != (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ == right_)); } MagickPPExport int Magick::operator > (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ < right_) && (left_ != right_)); } MagickPPExport int Magick::operator < (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels are less, then image is smaller return((left_.rows() * left_.columns()) < (right_.rows() * right_.columns())); } MagickPPExport int Magick::operator >= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ > right_) || (left_ == right_)); } MagickPPExport int Magick::operator <= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ < right_) || (left_ == right_)); } Magick::Image::Image(void) : _imgRef(new ImageRef) { } Magick::Image::Image(const Blob &blob_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(blob_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Geometry &size_,const Color &color_) : _imgRef(new ImageRef) { // xc: prefix specifies an X11 color string std::string imageSpec("xc:"); imageSpec+=color_; try { quiet(true); // Set image size size(size_); // Initialize, Allocate and Read images read(imageSpec); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Image &image_) : _imgRef(image_._imgRef) { Lock lock(&_imgRef->_mutexLock); // Increase reference count ++_imgRef->_refCount; } Magick::Image::Image(const Image &image_,const Geometry &geometry_) : _imgRef(new ImageRef) { const RectangleInfo geometry=geometry_; OffsetInfo offset; MagickCore::Image *image; GetPPException; image=CloneImage(image_.constImage(),geometry_.width(),geometry_.height(), MagickTrue,exceptionInfo); replaceImage(image); _imgRef->options(new Options(*image_.constOptions())); offset.x=0; offset.y=0; (void) CopyImagePixels(image,image_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } Magick::Image::Image(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) : _imgRef(new ImageRef) { try { quiet(true); read(width_,height_,map_,type_,pixels_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const std::string &imageSpec_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(imageSpec_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::~Image() { bool doDelete=false; { Lock lock(&_imgRef->_mutexLock); if (--_imgRef->_refCount == 0) doDelete=true; } if (doDelete) delete _imgRef; _imgRef=0; } Magick::Image& Magick::Image::operator=(const Magick::Image &image_) { if (this != &image_) { bool doDelete=false; { Lock lock(&image_._imgRef->_mutexLock); ++image_._imgRef->_refCount; } { Lock lock(&_imgRef->_mutexLock); if (--_imgRef->_refCount == 0) doDelete=true; } if (doDelete) { // Delete old image reference with associated image and options. delete _imgRef; _imgRef=0; } // Use new image reference _imgRef=image_._imgRef; } return(*this); } void Magick::Image::adjoin(const bool flag_) { modifyImage(); options()->adjoin(flag_); } bool Magick::Image::adjoin(void) const { return(constOptions()->adjoin()); } void Magick::Image::antiAlias(const bool flag_) { modifyImage(); options()->antiAlias(flag_); } bool Magick::Image::antiAlias(void) const { return(constOptions()->antiAlias()); } void Magick::Image::animationDelay(const size_t delay_) { modifyImage(); image()->delay=delay_; } size_t Magick::Image::animationDelay(void) const { return(constImage()->delay); } void Magick::Image::animationIterations(const size_t iterations_) { modifyImage(); image()->iterations=iterations_; } size_t Magick::Image::animationIterations(void) const { return(constImage()->iterations); } void Magick::Image::attenuate(const double attenuate_) { char value[MaxTextExtent]; modifyImage(); FormatLocaleString(value,MaxTextExtent,"%.20g",attenuate_); (void) SetImageArtifact(image(),"attenuate",value); } void Magick::Image::backgroundColor(const Color &backgroundColor_) { modifyImage(); if (backgroundColor_.isValid()) image()->background_color=backgroundColor_; else image()->background_color=Color(); options()->backgroundColor(backgroundColor_); } Magick::Color Magick::Image::backgroundColor(void) const { return(constOptions()->backgroundColor()); } void Magick::Image::backgroundTexture(const std::string &backgroundTexture_) { modifyImage(); options()->backgroundTexture(backgroundTexture_); } std::string Magick::Image::backgroundTexture(void) const { return(constOptions()->backgroundTexture()); } size_t Magick::Image::baseColumns(void) const { return(constImage()->magick_columns); } std::string Magick::Image::baseFilename(void) const { return(std::string(constImage()->magick_filename)); } size_t Magick::Image::baseRows(void) const { return(constImage()->magick_rows); } void Magick::Image::blackPointCompensation(const bool flag_) { image()->black_point_compensation=(MagickBooleanType) flag_; } bool Magick::Image::blackPointCompensation(void) const { return(static_cast<bool>(constImage()->black_point_compensation)); } void Magick::Image::borderColor(const Color &borderColor_) { modifyImage(); if (borderColor_.isValid()) image()->border_color=borderColor_; else image()->border_color=Color(); options()->borderColor(borderColor_); } Magick::Color Magick::Image::borderColor(void) const { return(constOptions()->borderColor()); } Magick::Geometry Magick::Image::boundingBox(void) const { RectangleInfo bbox; GetPPException; bbox=GetImageBoundingBox(constImage(),exceptionInfo); ThrowImageException; return(Geometry(bbox)); } void Magick::Image::boxColor(const Color &boxColor_) { modifyImage(); options()->boxColor(boxColor_); } Magick::Color Magick::Image::boxColor(void) const { return(constOptions()->boxColor()); } void Magick::Image::cacheThreshold(const size_t threshold_) { ResourceLimits::memory((MagickSizeType) threshold_); } void Magick::Image::classType(const ClassType class_) { if (classType() == PseudoClass && class_ == DirectClass) { // Use SyncImage to synchronize the DirectClass pixels with the // color map and then set to DirectClass type. modifyImage(); SyncImage(image()); image()->colormap=(PixelPacket *)RelinquishMagickMemory( image()->colormap); image()->storage_class=static_cast<MagickCore::ClassType>(DirectClass); } else if (classType() == DirectClass && class_ == PseudoClass) { // Quantize to create PseudoClass color map modifyImage(); quantizeColors(MaxColormapSize); quantize(); image()->storage_class=static_cast<MagickCore::ClassType>(PseudoClass); } } void Magick::Image::clipMask(const Magick::Image &clipMask_) { modifyImage(); if (clipMask_.isValid()) SetImageClipMask(image(),clipMask_.constImage()); else SetImageClipMask(image(),0); } Magick::Image Magick::Image::clipMask(void) const { MagickCore::Image *image; GetPPException; image=GetImageClipMask(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::colorFuzz(const double fuzz_) { modifyImage(); image()->fuzz=fuzz_; options()->colorFuzz(fuzz_); } double Magick::Image::colorFuzz(void) const { return(constOptions()->colorFuzz()); } void Magick::Image::colorMapSize(const size_t entries_) { if (entries_ > MaxColormapSize) throwExceptionExplicit(OptionError, "Colormap entries must not exceed MaxColormapSize"); modifyImage(); (void) AcquireImageColormap(image(),entries_); } size_t Magick::Image::colorMapSize(void) const { if (!constImage()->colormap) throwExceptionExplicit(OptionError,"Image does not contain a colormap"); return(constImage()->colors); } void Magick::Image::colorSpace(const ColorspaceType colorSpace_) { if (image()->colorspace == colorSpace_) return; modifyImage(); TransformImageColorspace(image(),colorSpace_); throwImageException(); } Magick::ColorspaceType Magick::Image::colorSpace(void) const { return(constImage()->colorspace); } void Magick::Image::colorspaceType(const ColorspaceType colorSpace_) { modifyImage(); SetImageColorspace(image(),colorSpace_); throwImageException(); options()->colorspaceType(colorSpace_); } Magick::ColorspaceType Magick::Image::colorspaceType(void) const { return(constOptions()->colorspaceType()); } void Magick::Image::comment(const std::string &comment_) { modifyImage(); SetImageProperty(image(),"Comment",NULL); if (comment_.length() > 0) SetImageProperty(image(),"Comment",comment_.c_str()); throwImageException(); } std::string Magick::Image::comment(void) const { const char *value; value=GetImageProperty(constImage(),"Comment"); if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::compose(const CompositeOperator compose_) { image()->compose=compose_; } Magick::CompositeOperator Magick::Image::compose(void) const { return(constImage()->compose); } void Magick::Image::compressType(const CompressionType compressType_) { modifyImage(); image()->compression=compressType_; options()->compressType(compressType_); } Magick::CompressionType Magick::Image::compressType(void) const { return(constImage()->compression); } void Magick::Image::debug(const bool flag_) { modifyImage(); options()->debug(flag_); } bool Magick::Image::debug(void) const { return(constOptions()->debug()); } void Magick::Image::density(const Geometry &density_) { modifyImage(); options()->density(density_); if (density_.isValid()) { image()->x_resolution=density_.width(); if (density_.height() != 0) image()->y_resolution=density_.height(); else image()->y_resolution=density_.width(); } else { // Reset to default image()->x_resolution=0; image()->y_resolution=0; } } Magick::Geometry Magick::Image::density(void) const { if (isValid()) { ssize_t x_resolution=72, y_resolution=72; if (constImage()->x_resolution > 0.0) x_resolution=static_cast<ssize_t>(constImage()->x_resolution + 0.5); if (constImage()->y_resolution > 0.0) y_resolution=static_cast<ssize_t>(constImage()->y_resolution + 0.5); return(Geometry(x_resolution,y_resolution)); } return(constOptions()->density()); } void Magick::Image::depth(const size_t depth_) { size_t depth=depth_; if (depth > MAGICKCORE_QUANTUM_DEPTH) depth=MAGICKCORE_QUANTUM_DEPTH; modifyImage(); image()->depth=depth; options()->depth(depth); } size_t Magick::Image::depth(void) const { return(constImage()->depth); } std::string Magick::Image::directory(void) const { if (constImage()->directory) return(std::string(constImage()->directory)); throwExceptionExplicit(CorruptImageWarning, "Image does not contain a directory"); return(std::string()); } void Magick::Image::endian(const Magick::EndianType endian_) { modifyImage(); options()->endian(endian_); image()->endian=endian_; } Magick::EndianType Magick::Image::endian(void) const { return(constImage()->endian); } void Magick::Image::exifProfile(const Magick::Blob &exifProfile_) { if (exifProfile_.data() != 0) { StringInfo *exif_profile; modifyImage(); exif_profile=AcquireStringInfo(exifProfile_.length()); SetStringInfoDatum(exif_profile,(unsigned char *) exifProfile_.data()); (void) SetImageProfile(image(),"exif",exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } Magick::Blob Magick::Image::exifProfile(void) const { const StringInfo *exif_profile; exif_profile=GetImageProfile(constImage(),"exif"); if (exif_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(exif_profile),GetStringInfoLength( exif_profile))); } void Magick::Image::fileName(const std::string &fileName_) { modifyImage(); fileName_.copy(image()->filename,sizeof(image()->filename)-1); image()->filename[fileName_.length()]=0; // Null terminate options()->fileName(fileName_); } std::string Magick::Image::fileName(void) const { return(constOptions()->fileName()); } off_t Magick::Image::fileSize(void) const { return((off_t) GetBlobSize(constImage())); } void Magick::Image::fillColor(const Magick::Color &fillColor_) { modifyImage(); options()->fillColor(fillColor_); } Magick::Color Magick::Image::fillColor(void) const { return(constOptions()->fillColor()); } void Magick::Image::fillRule(const Magick::FillRule &fillRule_) { modifyImage(); options()->fillRule(fillRule_); } Magick::FillRule Magick::Image::fillRule(void) const { return(constOptions()->fillRule()); } void Magick::Image::fillPattern(const Image &fillPattern_) { modifyImage(); if(fillPattern_.isValid()) options()->fillPattern(fillPattern_.constImage()); else options()->fillPattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::fillPattern(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->fillPattern(); if(tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::filterType(const Magick::FilterTypes filterType_) { modifyImage(); image()->filter=filterType_; } Magick::FilterTypes Magick::Image::filterType(void) const { return(constImage()->filter); } void Magick::Image::font(const std::string &font_) { modifyImage(); options()->font(font_); } std::string Magick::Image::font(void) const { return(constOptions()->font()); } void Magick::Image::fontFamily(const std::string &family_) { modifyImage(); options()->fontFamily(family_); } std::string Magick::Image::fontFamily(void) const { return(constOptions()->fontFamily()); } void Magick::Image::fontPointsize(const double pointSize_) { modifyImage(); options()->fontPointsize(pointSize_); } double Magick::Image::fontPointsize(void) const { return(constOptions()->fontPointsize()); } std::string Magick::Image::format(void) const { const MagickInfo *magick_info; GetPPException; magick_info=GetMagickInfo(constImage()->magick,exceptionInfo); ThrowImageException; if ((magick_info != 0) && (*magick_info->description != '\0')) return(std::string(magick_info->description)); throwExceptionExplicit(CorruptImageWarning,"Unrecognized image magick type"); return(std::string()); } void Magick::Image::fontStyle(const StyleType pointSize_) { modifyImage(); options()->fontStyle(pointSize_); } Magick::StyleType Magick::Image::fontStyle(void) const { return(constOptions()->fontStyle()); } void Magick::Image::fontWeight(const size_t weight_) { modifyImage(); options()->fontWeight(weight_); } size_t Magick::Image::fontWeight(void) const { return(constOptions()->fontWeight()); } std::string Magick::Image::formatExpression(const std::string expression) { char *text; std::string text_string; modifyImage(); text=InterpretImageProperties(constImageInfo(),image(),expression.c_str()); if (text != (char *) NULL) { text_string=std::string(text); text=DestroyString(text); } throwImageException(); return(text_string); } double Magick::Image::gamma(void) const { return(constImage()->gamma); } Magick::Geometry Magick::Image::geometry(void) const { if (constImage()->geometry) return(Geometry(constImage()->geometry)); throwExceptionExplicit(OptionWarning,"Image does not contain a geometry"); return(Geometry()); } void Magick::Image::gifDisposeMethod(const size_t disposeMethod_) { modifyImage(); image()->dispose=(DisposeType) disposeMethod_; } size_t Magick::Image::gifDisposeMethod(void) const { // FIXME: It would be better to return an enumeration return ((size_t) constImage()->dispose); } void Magick::Image::highlightColor(const Color color_) { std::string value; value=color_; artifact("highlight-color",value); } void Magick::Image::iccColorProfile(const Magick::Blob &colorProfile_) { profile("icc",colorProfile_); } Magick::Blob Magick::Image::iccColorProfile(void) const { const StringInfo *color_profile; color_profile=GetImageProfile(constImage(),"icc"); if (color_profile == (StringInfo *) NULL) return Blob(); return(Blob(GetStringInfoDatum(color_profile),GetStringInfoLength( color_profile))); } void Magick::Image::interlaceType(const InterlaceType interlace_) { modifyImage(); image()->interlace=interlace_; options()->interlaceType(interlace_); } Magick::InterlaceType Magick::Image::interlaceType(void) const { return constImage()->interlace; } void Magick::Image::interpolate(const InterpolatePixelMethod interpolate_) { modifyImage(); image()->interpolate=interpolate_; } Magick::InterpolatePixelMethod Magick::Image::interpolate(void) const { return constImage()->interpolate; } void Magick::Image::iptcProfile(const Magick::Blob &iptcProfile_) { modifyImage(); if (iptcProfile_.data() != 0) { StringInfo *iptc_profile; iptc_profile=AcquireStringInfo(iptcProfile_.length()); SetStringInfoDatum(iptc_profile,(unsigned char *) iptcProfile_.data()); (void) SetImageProfile(image(),"iptc",iptc_profile); iptc_profile=DestroyStringInfo(iptc_profile ); } } Magick::Blob Magick::Image::iptcProfile(void) const { const StringInfo *iptc_profile; iptc_profile=GetImageProfile(constImage(),"iptc"); if (iptc_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(iptc_profile),GetStringInfoLength( iptc_profile))); } bool Magick::Image::isOpaque(void) const { MagickBooleanType result; GetPPException; result=IsOpaqueImage(constImage(),exceptionInfo); ThrowImageException; return(result != MagickFalse ? true : false); } void Magick::Image::isValid(const bool isValid_) { if (!isValid_) { delete _imgRef; _imgRef = new ImageRef; } else if (!isValid()) { // Construct with single-pixel black image to make // image valid. This is an obvious hack. size(Geometry(1,1)); read("xc:black"); } } bool Magick::Image::isValid(void) const { return(rows() && columns()); } void Magick::Image::label(const std::string &label_) { modifyImage(); (void) SetImageProperty(image(),"Label",NULL); if (label_.length() > 0) (void) SetImageProperty(image(),"Label",label_.c_str()); throwImageException(); } std::string Magick::Image::label(void) const { const char *value; value=GetImageProperty(constImage(),"Label"); if (value) return(std::string(value)); return(std::string()); } void Magick::Image::lowlightColor(const Color color_) { std::string value; value=color_; artifact("lowlight-color",value); } void Magick::Image::magick(const std::string &magick_) { size_t length; modifyImage(); length=sizeof(image()->magick)-1; if (magick_.length() < length) length=magick_.length(); if (!magick_.empty()) magick_.copy(image()->magick,length); image()->magick[length]=0; options()->magick(magick_); } std::string Magick::Image::magick(void) const { if (*(constImage()->magick) != '\0') return(std::string(constImage()->magick)); return(constOptions()->magick()); } void Magick::Image::mask(const Magick::Image &mask_) { modifyImage(); if (mask_.isValid()) SetImageMask(image(),mask_.constImage()); else SetImageMask(image(),0); } Magick::Image Magick::Image::mask(void) const { MagickCore::Image *image; GetPPException; image=GetImageMask(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::matte(const bool matteFlag_) { modifyImage(); // If matte channel is requested, but image doesn't already have a // matte channel, then create an opaque matte channel. Likewise, if // the image already has a matte channel but a matte channel is not // desired, then set the matte channel to opaque. if ((matteFlag_ && !constImage()->matte) || (constImage()->matte && !matteFlag_)) SetImageOpacity(image(),OpaqueOpacity); image()->matte=(MagickBooleanType) matteFlag_; } bool Magick::Image::matte(void) const { if (constImage()->matte) return true; else return false; } void Magick::Image::matteColor(const Color &matteColor_) { modifyImage(); if (matteColor_.isValid()) { image()->matte_color=matteColor_; options()->matteColor(matteColor_); } else { // Set to default matte color Color tmpColor("#BDBDBD"); image()->matte_color=tmpColor; options()->matteColor(tmpColor); } } Magick::Color Magick::Image::matteColor(void) const { return(Color(constImage()->matte_color.red,constImage()->matte_color.green, constImage()->matte_color.blue)); } double Magick::Image::meanErrorPerPixel(void) const { return(constImage()->error.mean_error_per_pixel); } void Magick::Image::modulusDepth(const size_t depth_) { modifyImage(); SetImageDepth(image(),depth_); options()->depth(depth_); } size_t Magick::Image::modulusDepth(void) const { size_t depth; GetPPException; depth=GetImageDepth(constImage(),exceptionInfo); ThrowImageException; return(depth); } void Magick::Image::monochrome(const bool monochromeFlag_) { modifyImage(); options()->monochrome(monochromeFlag_); } bool Magick::Image::monochrome(void) const { return(constOptions()->monochrome()); } Magick::Geometry Magick::Image::montageGeometry(void) const { if (constImage()->montage) return(Magick::Geometry(constImage()->montage)); throwExceptionExplicit(CorruptImageWarning, "Image does not contain a montage"); return(Magick::Geometry()); } double Magick::Image::normalizedMaxError(void) const { return(constImage()->error.normalized_maximum_error); } double Magick::Image::normalizedMeanError(void) const { return (constImage()->error.normalized_mean_error); } void Magick::Image::orientation(const Magick::OrientationType orientation_) { modifyImage(); image()->orientation=orientation_; } Magick::OrientationType Magick::Image::orientation(void) const { return(constImage()->orientation); } void Magick::Image::page(const Magick::Geometry &pageSize_) { modifyImage(); options()->page(pageSize_); image()->page=pageSize_; } Magick::Geometry Magick::Image::page(void) const { return(Geometry(constImage()->page.width,constImage()->page.height, AbsoluteValue(constImage()->page.x),AbsoluteValue(constImage()->page.y), constImage()->page.x < 0 ? true : false, constImage()->page.y < 0 ? true : false)); } void Magick::Image::penColor(const Color &penColor_) { modifyImage(); options()->fillColor(penColor_); options()->strokeColor(penColor_); } Magick::Color Magick::Image::penColor(void) const { return(constOptions()->fillColor()); } void Magick::Image::penTexture(const Image &penTexture_) { modifyImage(); if(penTexture_.isValid()) options()->fillPattern(penTexture_.constImage()); else options()->fillPattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::penTexture(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->fillPattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::quality(const size_t quality_) { modifyImage(); image()->quality=quality_; options()->quality(quality_); } size_t Magick::Image::quality(void) const { return(constImage()->quality); } void Magick::Image::quantizeColors(const size_t colors_) { modifyImage(); options()->quantizeColors(colors_); } size_t Magick::Image::quantizeColors(void) const { return(constOptions()->quantizeColors()); } void Magick::Image::quantizeColorSpace( const Magick::ColorspaceType colorSpace_) { modifyImage(); options()->quantizeColorSpace(colorSpace_); } Magick::ColorspaceType Magick::Image::quantizeColorSpace(void) const { return(constOptions()->quantizeColorSpace()); } void Magick::Image::quantizeDither(const bool ditherFlag_) { modifyImage(); options()->quantizeDither(ditherFlag_); } bool Magick::Image::quantizeDither(void) const { return(constOptions()->quantizeDither()); } void Magick::Image::quantizeDitherMethod(const DitherMethod ditherMethod_) { modifyImage(); options()->quantizeDitherMethod(ditherMethod_); } MagickCore::DitherMethod Magick::Image::quantizeDitherMethod(void) const { return(constOptions()->quantizeDitherMethod()); } void Magick::Image::quantizeTreeDepth(const size_t treeDepth_) { modifyImage(); options()->quantizeTreeDepth(treeDepth_); } size_t Magick::Image::quantizeTreeDepth(void) const { return(constOptions()->quantizeTreeDepth()); } void Magick::Image::quiet(const bool quiet_) { modifyImage(); options()->quiet(quiet_); } bool Magick::Image::quiet(void) const { return(constOptions()->quiet()); } void Magick::Image::renderingIntent( const Magick::RenderingIntent renderingIntent_) { modifyImage(); image()->rendering_intent=renderingIntent_; } Magick::RenderingIntent Magick::Image::renderingIntent(void) const { return(static_cast<Magick::RenderingIntent>( constImage()->rendering_intent)); } void Magick::Image::resolutionUnits( const Magick::ResolutionType resolutionUnits_) { modifyImage(); image()->units=resolutionUnits_; options()->resolutionUnits(resolutionUnits_); } Magick::ResolutionType Magick::Image::resolutionUnits(void) const { return(static_cast<Magick::ResolutionType>(constImage()->units)); } void Magick::Image::scene(const size_t scene_) { modifyImage(); image()->scene=scene_; } size_t Magick::Image::scene(void) const { return(constImage()->scene); } void Magick::Image::size(const Geometry &geometry_) { modifyImage(); options()->size(geometry_); image()->rows=geometry_.height(); image()->columns=geometry_.width(); } Magick::Geometry Magick::Image::size(void) const { return(Magick::Geometry(constImage()->columns,constImage()->rows)); } void Magick::Image::strokeAntiAlias(const bool flag_) { modifyImage(); options()->strokeAntiAlias(flag_); } bool Magick::Image::strokeAntiAlias(void) const { return(constOptions()->strokeAntiAlias()); } void Magick::Image::strokeColor(const Magick::Color &strokeColor_) { std::string value; modifyImage(); options()->strokeColor(strokeColor_); value=strokeColor_; artifact("stroke",value); } Magick::Color Magick::Image::strokeColor(void) const { return(constOptions()->strokeColor()); } void Magick::Image::strokeDashArray(const double *strokeDashArray_) { modifyImage(); options()->strokeDashArray(strokeDashArray_); } const double *Magick::Image::strokeDashArray(void) const { return(constOptions()->strokeDashArray()); } void Magick::Image::strokeDashOffset(const double strokeDashOffset_) { modifyImage(); options()->strokeDashOffset(strokeDashOffset_); } double Magick::Image::strokeDashOffset(void) const { return(constOptions()->strokeDashOffset()); } void Magick::Image::strokeLineCap(const Magick::LineCap lineCap_) { modifyImage(); options()->strokeLineCap(lineCap_); } Magick::LineCap Magick::Image::strokeLineCap(void) const { return(constOptions()->strokeLineCap()); } void Magick::Image::strokeLineJoin(const Magick::LineJoin lineJoin_) { modifyImage(); options()->strokeLineJoin(lineJoin_); } Magick::LineJoin Magick::Image::strokeLineJoin(void) const { return(constOptions()->strokeLineJoin()); } void Magick::Image::strokeMiterLimit(const size_t strokeMiterLimit_) { modifyImage(); options()->strokeMiterLimit(strokeMiterLimit_); } size_t Magick::Image::strokeMiterLimit(void) const { return constOptions()->strokeMiterLimit(); } void Magick::Image::strokePattern(const Image &strokePattern_) { modifyImage(); if(strokePattern_.isValid()) options()->strokePattern(strokePattern_.constImage()); else options()->strokePattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::strokePattern(void) const { const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->strokePattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::strokeWidth(const double strokeWidth_) { char value[MaxTextExtent]; modifyImage(); options()->strokeWidth(strokeWidth_); FormatLocaleString(value,MaxTextExtent,"%.20g",strokeWidth_); (void) SetImageArtifact(image(),"strokewidth",value); } double Magick::Image::strokeWidth(void) const { return(constOptions()->strokeWidth()); } void Magick::Image::subImage(const size_t subImage_) { modifyImage(); options()->subImage(subImage_); } size_t Magick::Image::subImage(void) const { return(constOptions()->subImage()); } void Magick::Image::subRange(const size_t subRange_) { modifyImage(); options()->subRange(subRange_); } size_t Magick::Image::subRange(void) const { return(constOptions()->subRange()); } void Magick::Image::textDirection(DirectionType direction_) { modifyImage(); options()->textDirection(direction_); } Magick::DirectionType Magick::Image::textDirection(void) const { return(constOptions()->textDirection()); } void Magick::Image::textEncoding(const std::string &encoding_) { modifyImage(); options()->textEncoding(encoding_); } std::string Magick::Image::textEncoding(void) const { return(constOptions()->textEncoding()); } void Magick::Image::textGravity(GravityType gravity_) { modifyImage(); options()->textGravity(gravity_); } Magick::GravityType Magick::Image::textGravity(void) const { return(constOptions()->textGravity()); } void Magick::Image::textInterlineSpacing(double spacing_) { modifyImage(); options()->textInterlineSpacing(spacing_); } double Magick::Image::textInterlineSpacing(void) const { return(constOptions()->textInterlineSpacing()); } void Magick::Image::textInterwordSpacing(double spacing_) { modifyImage(); options()->textInterwordSpacing(spacing_); } double Magick::Image::textInterwordSpacing(void) const { return(constOptions()->textInterwordSpacing()); } void Magick::Image::textKerning(double kerning_) { modifyImage(); options()->textKerning(kerning_); } double Magick::Image::textKerning(void) const { return(constOptions()->textKerning()); } void Magick::Image::textUnderColor(const Color &underColor_) { modifyImage(); options()->textUnderColor(underColor_); } Magick::Color Magick::Image::textUnderColor(void) const { return(constOptions()->textUnderColor()); } void Magick::Image::tileName(const std::string &tileName_) { modifyImage(); options()->tileName(tileName_); } std::string Magick::Image::tileName(void) const { return(constOptions()->tileName()); } size_t Magick::Image::totalColors(void) const { size_t colors; GetPPException; colors=GetNumberColors(constImage(),0,exceptionInfo); ThrowImageException; return(colors); } void Magick::Image::transformRotation(const double angle_) { modifyImage(); options()->transformRotation(angle_); } void Magick::Image::transformSkewX(const double skewx_) { modifyImage(); options()->transformSkewX(skewx_); } void Magick::Image::transformSkewY(const double skewy_) { modifyImage(); options()->transformSkewY(skewy_); } void Magick::Image::type(const Magick::ImageType type_) { modifyImage(); options()->type(type_); SetImageType(image(),type_); } Magick::ImageType Magick::Image::type(void) const { if (constOptions()->type() != UndefinedType) return(constOptions()->type()); else if (constImage()->type != UndefinedType) return(constImage()->type); else return(determineType()); } void Magick::Image::verbose(const bool verboseFlag_) { modifyImage(); options()->verbose(verboseFlag_); } bool Magick::Image::verbose(void) const { return(constOptions()->verbose()); } void Magick::Image::view(const std::string &view_) { modifyImage(); options()->view(view_); } std::string Magick::Image::view(void) const { return(constOptions()->view()); } void Magick::Image::virtualPixelMethod( const VirtualPixelMethod virtual_pixel_method_) { modifyImage(); SetImageVirtualPixelMethod(image(),virtual_pixel_method_); options()->virtualPixelMethod(virtual_pixel_method_); } Magick::VirtualPixelMethod Magick::Image::virtualPixelMethod(void) const { return(GetImageVirtualPixelMethod(constImage())); } void Magick::Image::x11Display(const std::string &display_) { modifyImage(); options()->x11Display(display_); } std::string Magick::Image::x11Display(void) const { return(constOptions()->x11Display()); } double Magick::Image::xResolution(void) const { return(constImage()->x_resolution); } double Magick::Image::yResolution(void) const { return(constImage()->y_resolution); } void Magick::Image::adaptiveBlur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveBlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveResize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t width=columns(), height=rows(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=AdaptiveResizeImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveSharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpenChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveSharpenImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveThreshold(const size_t width_,const size_t height_, const ssize_t offset_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveThresholdImage(constImage(),width_,height_,offset_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoise(const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; newImage=AddNoiseImage(constImage(),noiseType_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoiseChannel(const ChannelType channel_, const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; newImage=AddNoiseImageChannel(constImage(),channel_,noiseType_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::affineTransform(const DrawableAffine &affine_ ) { AffineMatrix _affine; MagickCore::Image *newImage; _affine.sx = affine_.sx(); _affine.sy = affine_.sy(); _affine.rx = affine_.rx(); _affine.ry = affine_.ry(); _affine.tx = affine_.tx(); _affine.ty = affine_.ty(); GetPPException; newImage=AffineTransformImage(constImage(),&_affine,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::alphaChannel(AlphaChannelType alphaType_) { modifyImage(); SetImageAlphaChannel(image(), alphaType_); throwImageException(); } void Magick::Image::annotate(const std::string &text_, const Geometry &location_) { annotate(text_,location_,NorthWestGravity,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_) { annotate(text_,boundingArea_,gravity_,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_, const double degrees_) { AffineMatrix oaffine; char boundingArea[MaxTextExtent]; DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->geometry=DestroyString(drawInfo->geometry); if (boundingArea_.isValid()) { if (boundingArea_.width() == 0 || boundingArea_.height() == 0) { FormatLocaleString(boundingArea,MaxTextExtent,"%+.20g%+.20g", (double) boundingArea_.xOff(),(double) boundingArea_.yOff()); } else { (void) CopyMagickString(boundingArea, std::string(boundingArea_).c_str(), MaxTextExtent); } drawInfo->geometry=boundingArea; } drawInfo->gravity=gravity_; oaffine=drawInfo->affine; if (degrees_ != 0.0) { AffineMatrix affine, current; affine.sx=1.0; affine.rx=0.0; affine.ry=0.0; affine.sy=1.0; affine.tx=0.0; affine.ty=0.0; current=drawInfo->affine; affine.sx=cos(DegreesToRadians(fmod(degrees_,360.0))); affine.rx=sin(DegreesToRadians(fmod(degrees_,360.0))); affine.ry=(-sin(DegreesToRadians(fmod(degrees_,360.0)))); affine.sy=cos(DegreesToRadians(fmod(degrees_,360.0))); drawInfo->affine.sx=current.sx*affine.sx+current.ry*affine.rx; drawInfo->affine.rx=current.rx*affine.sx+current.sy*affine.rx; drawInfo->affine.ry=current.sx*affine.ry+current.ry*affine.sy; drawInfo->affine.sy=current.rx*affine.ry+current.sy*affine.sy; drawInfo->affine.tx=current.sx*affine.tx+current.ry*affine.ty +current.tx; } AnnotateImage(image(),drawInfo); // Restore original values drawInfo->affine=oaffine; drawInfo->text=(char *) NULL; drawInfo->geometry=(char *) NULL; throwImageException(); } void Magick::Image::annotate(const std::string &text_, const GravityType gravity_) { DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->gravity=gravity_; AnnotateImage(image(),drawInfo); drawInfo->gravity=NorthWestGravity; drawInfo->text=(char *) NULL; throwImageException(); } void Magick::Image::artifact(const std::string &name_, const std::string &value_) { modifyImage(); (void) SetImageArtifact(image(),name_.c_str(),value_.c_str()); } std::string Magick::Image::artifact(const std::string &name_) const { const char *value; value=GetImageArtifact(constImage(),name_.c_str()); if (value) return(std::string(value)); return(std::string()); } void Magick::Image::attribute(const std::string name_,const char *value_) { modifyImage(); SetImageProperty(image(),name_.c_str(),value_); } void Magick::Image::attribute(const std::string name_,const std::string value_) { modifyImage(); SetImageProperty(image(),name_.c_str(),value_.c_str()); } std::string Magick::Image::attribute(const std::string name_) const { const char *value; value=GetImageProperty(constImage(),name_.c_str()); if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::autoGamma(void) { modifyImage(); (void) AutoGammaImage(image()); throwImageException(); } void Magick::Image::autoGammaChannel(const ChannelType channel_) { modifyImage(); (void) AutoGammaImageChannel(image(),channel_); throwImageException(); } void Magick::Image::autoLevel(void) { modifyImage(); (void) AutoLevelImage(image()); throwImageException(); } void Magick::Image::autoLevelChannel(const ChannelType channel_) { modifyImage(); (void) AutoLevelImageChannel(image(),channel_); throwImageException(); } void Magick::Image::autoOrient(void) { MagickCore::Image *newImage; if (image()->orientation == UndefinedOrientation || image()->orientation == TopLeftOrientation) return; GetPPException; newImage=AutoOrientImage(constImage(),image()->orientation,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blackThreshold(const std::string &threshold_) { modifyImage(); BlackThresholdImage(image(),threshold_.c_str()); throwImageException(); } void Magick::Image::blackThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; BlackThresholdImageChannel(image(),channel_,threshold_.c_str(), exceptionInfo); ThrowImageException; } void Magick::Image::blueShift(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=BlueShiftImage(constImage(),factor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } // Blur image void Magick::Image::blur(const double radius_, const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=BlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blurChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=BlurImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::border(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo borderInfo=geometry_; GetPPException; newImage=BorderImage(constImage(),&borderInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::brightnessContrast(const double brightness_, const double contrast_) { modifyImage(); BrightnessContrastImage(image(),brightness_,contrast_); throwImageException(); } void Magick::Image::brightnessContrastChannel(const ChannelType channel_, const double brightness_,const double contrast_) { modifyImage(); BrightnessContrastImageChannel(image(),channel_,brightness_,contrast_); throwImageException(); } void Magick::Image::cannyEdge(const double radius_,const double sigma_, const double lowerPercent_,const double upperPercent_) { MagickCore::Image *newImage; modifyImage(); GetPPException; newImage=CannyEdgeImage(constImage(),radius_,sigma_,lowerPercent_, upperPercent_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cdl(const std::string &cdl_) { modifyImage(); (void) ColorDecisionListImage(image(),cdl_.c_str()); throwImageException(); } void Magick::Image::channel(const ChannelType channel_) { modifyImage(); SeparateImageChannel(image(),channel_); throwImageException(); } void Magick::Image::channelDepth(const ChannelType channel_, const size_t depth_) { modifyImage(); SetImageChannelDepth(image(),channel_,depth_); throwImageException(); } size_t Magick::Image::channelDepth(const ChannelType channel_) { size_t channel_depth; GetPPException; channel_depth=GetImageChannelDepth(constImage(), channel_,exceptionInfo); ThrowImageException; return channel_depth; } void Magick::Image::charcoal(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=CharcoalImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::chop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo chopInfo=geometry_; GetPPException; newImage=ChopImage(constImage(),&chopInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::chromaBluePrimary(const double x_,const double y_) { modifyImage(); image()->chromaticity.blue_primary.x=x_; image()->chromaticity.blue_primary.y=y_; } void Magick::Image::chromaBluePrimary(double *x_,double *y_) const { *x_=constImage()->chromaticity.blue_primary.x; *y_=constImage()->chromaticity.blue_primary.y; } void Magick::Image::chromaGreenPrimary(const double x_,const double y_) { modifyImage(); image()->chromaticity.green_primary.x=x_; image()->chromaticity.green_primary.y=y_; } void Magick::Image::chromaGreenPrimary(double *x_,double *y_) const { *x_=constImage()->chromaticity.green_primary.x; *y_=constImage()->chromaticity.green_primary.y; } void Magick::Image::chromaRedPrimary(const double x_,const double y_) { modifyImage(); image()->chromaticity.red_primary.x=x_; image()->chromaticity.red_primary.y=y_; } void Magick::Image::chromaRedPrimary(double *x_,double *y_) const { *x_=constImage()->chromaticity.red_primary.x; *y_=constImage()->chromaticity.red_primary.y; } void Magick::Image::chromaWhitePoint(const double x_,const double y_) { modifyImage(); image()->chromaticity.white_point.x=x_; image()->chromaticity.white_point.y=y_; } void Magick::Image::chromaWhitePoint(double *x_,double *y_) const { *x_=constImage()->chromaticity.white_point.x; *y_=constImage()->chromaticity.white_point.y; } void Magick::Image::clamp(void) { modifyImage(); ClampImage(image()); throwImageException(); } void Magick::Image::clampChannel(const ChannelType channel_) { modifyImage(); ClampImageChannel(image(),channel_); throwImageException(); } void Magick::Image::clip(void ) { modifyImage(); ClipImage(image()); throwImageException(); } void Magick::Image::clipPath(const std::string pathname_,const bool inside_) { modifyImage(); ClipImagePath(image(),pathname_.c_str(),(MagickBooleanType) inside_); throwImageException(); } void Magick::Image::clut(const Image &clutImage_) { modifyImage(); ClutImage(image(),clutImage_.constImage()); throwImageException(); } void Magick::Image::clutChannel(const ChannelType channel_, const Image &clutImage_) { modifyImage(); ClutImageChannel(image(),channel_,clutImage_.constImage()); throwImageException(); } void Magick::Image::colorize(const unsigned int opacityRed_, const unsigned int opacityGreen_,const unsigned int opacityBlue_, const Color &penColor_) { char opacity[MaxTextExtent]; MagickCore::Image *newImage; if (!penColor_.isValid()) throwExceptionExplicit( OptionError, "Pen color argument is invalid" ); FormatLocaleString(opacity,MaxTextExtent,"%u/%u/%u",opacityRed_, opacityGreen_,opacityBlue_); GetPPException; newImage=ColorizeImage(image(),opacity,penColor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::colorize(const unsigned int opacity_, const Color &penColor_) { colorize(opacity_,opacity_,opacity_,penColor_); } void Magick::Image::colorMap(const size_t index_,const Color &color_) { if (index_ > (MaxColormapSize-1) ) throwExceptionExplicit(OptionError, "Colormap index must be less than MaxColormapSize"); if (!color_.isValid()) throwExceptionExplicit(OptionError,"Color argument is invalid"); modifyImage(); // Ensure that colormap size is large enough if (colorMapSize() < (index_+1)) colorMapSize(index_+1); // Set color at index in colormap (image()->colormap)[index_]=color_; } Magick::Color Magick::Image::colorMap(const size_t index_) const { if (!constImage()->colormap) { throwExceptionExplicit(OptionError,"Image does not contain a colormap"); return(Color()); } if (index_ > constImage()->colors-1) throwExceptionExplicit(OptionError,"Index out of range"); return(Color((constImage()->colormap)[index_])); } void Magick::Image::colorMatrix(const size_t order_, const double *color_matrix_) { KernelInfo *kernel_info; MagickCore::Image *newImage; GetPPException; kernel_info=AcquireKernelInfo("1"); if (kernel_info != (KernelInfo *) NULL) { kernel_info->width=order_; kernel_info->height=order_; kernel_info->values=(double *) color_matrix_; newImage=ColorMatrixImage(constImage(),kernel_info,exceptionInfo); kernel_info->values=(double *) NULL; kernel_info=DestroyKernelInfo(kernel_info); replaceImage(newImage); ThrowImageException; } } bool Magick::Image::compare(const Image &reference_) { bool status; Image ref=reference_; modifyImage(); ref.modifyImage(); status=static_cast<bool>(IsImagesEqual(image(),ref.constImage())); throwImageException(); return(status); } double Magick::Image::compare(const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); ThrowImageException; return(distortion); } double Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageChannelDistortion(image(),reference_.constImage(),channel_,metric_, &distortion,exceptionInfo); ThrowImageException; return(distortion); } Magick::Image Magick::Image::compare(const Image &reference_, const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; newImage=CompareImages(image(),reference_.constImage(),metric_,distortion, exceptionInfo); ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } Magick::Image Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; newImage=CompareImageChannels(image(),reference_.constImage(),channel_, metric_,distortion,exceptionInfo); ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::composite(const Image &compositeImage_, const Geometry &offset_,const CompositeOperator compose_) { size_t height=rows(), width=columns(); ssize_t x=offset_.xOff(), y=offset_.yOff(); modifyImage(); ParseMetaGeometry(static_cast<std::string>(offset_).c_str(),&x,&y,&width, &height); CompositeImage(image(),compose_,compositeImage_.constImage(),x,y); throwImageException(); } void Magick::Image::composite(const Image &compositeImage_, const GravityType gravity_,const CompositeOperator compose_) { RectangleInfo geometry; modifyImage(); SetGeometry(compositeImage_.constImage(),&geometry); GravityAdjustGeometry(columns(),rows(),gravity_,&geometry); CompositeImage(image(),compose_,compositeImage_.constImage(),geometry.x, geometry.y); throwImageException(); } void Magick::Image::composite(const Image &compositeImage_, const ssize_t xOffset_,const ssize_t yOffset_, const CompositeOperator compose_) { // Image supplied as compositeImage is composited with current image and // results in updating current image. modifyImage(); CompositeImage(image(),compose_,compositeImage_.constImage(),xOffset_, yOffset_); throwImageException(); } void Magick::Image::connectedComponents(const size_t connectivity_) { MagickCore::Image *newImage; GetPPException; newImage=ConnectedComponentsImage(constImage(),connectivity_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::contrast(const size_t sharpen_) { modifyImage(); ContrastImage(image(),(MagickBooleanType) sharpen_); throwImageException(); } void Magick::Image::contrastStretch(const double black_point_, const double white_point_) { modifyImage(); ContrastStretchImageChannel(image(),DefaultChannels,black_point_, white_point_); throwImageException(); } void Magick::Image::contrastStretchChannel(const ChannelType channel_, const double black_point_,const double white_point_) { modifyImage(); ContrastStretchImageChannel(image(),channel_,black_point_,white_point_); throwImageException(); } void Magick::Image::convolve(const size_t order_,const double *kernel_) { MagickCore::Image *newImage; GetPPException; newImage=ConvolveImage(constImage(),order_,kernel_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::copyPixels(const Image &source_,const Geometry &geometry_, const Offset &offset_) { const OffsetInfo offset=offset_; const RectangleInfo geometry=geometry_; GetPPException; (void) CopyImagePixels(image(),source_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } void Magick::Image::crop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo cropInfo=geometry_; GetPPException; newImage=CropImage(constImage(),&cropInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cycleColormap(const ssize_t amount_) { modifyImage(); CycleColormapImage(image(),amount_); throwImageException(); } void Magick::Image::decipher(const std::string &passphrase_) { modifyImage(); GetPPException; DecipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::defineSet(const std::string &magick_, const std::string &key_,bool flag_) { std::string definition; modifyImage(); definition=magick_ + ":" + key_; if (flag_) (void) SetImageOption(imageInfo(),definition.c_str(),""); else DeleteImageOption(imageInfo(),definition.c_str()); } bool Magick::Image::defineSet(const std::string &magick_, const std::string &key_) const { const char *option; std::string key; key=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),key.c_str()); if (option) return(true); return(false); } void Magick::Image::defineValue(const std::string &magick_, const std::string &key_,const std::string &value_) { std::string format; modifyImage(); format=magick_ + ":" + key_; (void) SetImageOption(imageInfo(),format.c_str(),value_.c_str()); } std::string Magick::Image::defineValue(const std::string &magick_, const std::string &key_) const { const char *option; std::string definition; definition=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),definition.c_str()); if (option) return(std::string(option)); return(std::string()); } void Magick::Image::deskew(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=DeskewImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::despeckle(void) { MagickCore::Image *newImage; GetPPException; newImage=DespeckleImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::ImageType Magick::Image::determineType(void) const { ImageType image_type; GetPPException; image_type=GetImageType(constImage(),exceptionInfo); ThrowImageException; return(image_type); } void Magick::Image::display(void) { DisplayImages(imageInfo(),image()); } void Magick::Image::distort(const DistortImageMethod method_, const size_t number_arguments_,const double *arguments_,const bool bestfit_) { MagickCore::Image *newImage; GetPPException; newImage=DistortImage(constImage(),method_,number_arguments_,arguments_, bestfit_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::draw(const Magick::Drawable &drawable_) { DrawingWand *wand; modifyImage(); wand=AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { drawable_.operator()(wand); if (constImage()->exception.severity == UndefinedException) DrawRender(wand); wand=DestroyDrawingWand(wand); } throwImageException(); } void Magick::Image::draw(const std::list<Magick::Drawable> &drawable_) { DrawingWand *wand; modifyImage(); wand=AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { for (std::list<Magick::Drawable>::const_iterator p = drawable_.begin(); p != drawable_.end(); p++) { p->operator()(wand); if (constImage()->exception.severity != UndefinedException) break; } if (constImage()->exception.severity == UndefinedException) DrawRender(wand); wand=DestroyDrawingWand(wand); } throwImageException(); } void Magick::Image::edge(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=EdgeImage(constImage(),radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::emboss(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=EmbossImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::encipher(const std::string &passphrase_) { modifyImage(); GetPPException; EncipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::enhance(void) { MagickCore::Image *newImage; GetPPException; newImage=EnhanceImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::equalize(void) { modifyImage(); EqualizeImage(image()); throwImageException(); } void Magick::Image::erase(void) { modifyImage(); (void) SetImageBackgroundColor(image()); throwImageException(); } void Magick::Image::extent(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo extentInfo; modifyImage(); GetPPException; extentInfo=geometry_; extentInfo.x=geometry_.xOff(); extentInfo.y=geometry_.yOff(); newImage=ExtentImage(constImage(),&extentInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); extent(geometry_); } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { image()->gravity=gravity_; backgroundColor(backgroundColor_); extent(geometry_,gravity_); } void Magick::Image::extent(const Geometry &geometry_, const GravityType gravity_) { RectangleInfo geometry; SetGeometry(image(),&geometry); geometry.width=geometry_.width(); geometry.height=geometry_.height(); GravityAdjustGeometry(image()->columns,image()->rows,gravity_,&geometry); extent(geometry); } void Magick::Image::flip(void) { MagickCore::Image *newImage; GetPPException; newImage=FlipImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,false); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_) { floodFillColor(x_,y_,fillColor_,false); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const bool invert_) { PixelPacket pixel; modifyImage(); pixel=pixelColor(x_,y_); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const Magick::Color &borderColor_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,borderColor_,false); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,borderColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const Magick::Color &borderColor_) { floodFillColor(x_,y_,fillColor_,borderColor_,false); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { PixelPacket pixel; modifyImage(); pixel=static_cast<PixelPacket>(borderColor_); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillOpacity(const ssize_t x_,const ssize_t y_, const unsigned int opacity_,const bool invert_) { MagickPixelPacket target; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(constImage(),&target); pixel=static_cast<PixelPacket>(pixelColor(x_,y_)); target.red=pixel.red; target.green=pixel.green; target.blue=pixel.blue; target.opacity=opacity_; (void) FloodfillPaintImage(image(),OpacityChannel,options()->drawInfo(), &target,x_,y_,(MagickBooleanType)invert_); throwImageException(); } void Magick::Image::floodFillOpacity(const ssize_t x_,const ssize_t y_, const unsigned int opacity_,const PaintMethod method_) { floodFillOpacity(x_,y_,opacity_,method_ == FloodfillMethod ? false : true); } void Magick::Image::floodFillOpacity(const ::ssize_t x_,const ::ssize_t y_, const unsigned int opacity_,const Color &target_,const bool invert_) { MagickPixelPacket target; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(constImage(),&target); pixel=static_cast<PixelPacket>(target_); target.red=pixel.red; target.green=pixel.green; target.blue=pixel.blue; target.opacity=opacity_; (void) FloodfillPaintImage(image(),OpacityChannel,options()->drawInfo(), &target,x_,y_,(MagickBooleanType)invert_); throwImageException(); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,false); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_) { floodFillTexture(x_,y_,texture_,false); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const bool invert_) { PixelPacket pixel; modifyImage(); pixel=static_cast<PixelPacket>(pixelColor(x_,y_)); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const Magick::Color &borderColor_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,borderColor_,false); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,borderColor_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const Magick::Color &borderColor_) { floodFillTexture(x_,y_,texture_,borderColor_,false); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { PixelPacket pixel; modifyImage(); pixel=static_cast<PixelPacket>(borderColor_); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::flop(void) { MagickCore::Image *newImage; GetPPException; newImage=FlopImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fontTypeMetrics(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); if (GetTypeMetrics(image(),drawInfo,&(metrics->_typeMetric)) == MagickFalse) throwImageException(); drawInfo->text=0; } void Magick::Image::fontTypeMetricsMultiline(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); GetMultilineTypeMetrics(image(),drawInfo,&(metrics->_typeMetric)); drawInfo->text=0; } void Magick::Image::frame(const Geometry &geometry_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(geometry_.width()); info.y=static_cast<ssize_t>(geometry_.height()); info.width=columns() + ( static_cast<size_t>(info.x) << 1 ); info.height=rows() + ( static_cast<size_t>(info.y) << 1 ); info.outer_bevel=geometry_.xOff(); info.inner_bevel=geometry_.yOff(); GetPPException; newImage=FrameImage(constImage(),&info,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::frame(const size_t width_,const size_t height_, const ssize_t innerBevel_,const ssize_t outerBevel_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(width_); info.y=static_cast<ssize_t>(height_); info.width=columns() + ( static_cast<size_t>(info.x) << 1 ); info.height=rows() + ( static_cast<size_t>(info.y) << 1 ); info.outer_bevel=static_cast<ssize_t>(outerBevel_); info.inner_bevel=static_cast<ssize_t>(innerBevel_); GetPPException; newImage=FrameImage(constImage(),&info,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression) { MagickCore::Image *newImage; GetPPException; newImage=FxImageChannel(constImage(),DefaultChannels,expression.c_str(), exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression, const Magick::ChannelType channel) { MagickCore::Image *newImage; GetPPException; newImage=FxImageChannel(constImage(),channel,expression.c_str(), exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::gamma(const double gamma_) { char gamma[MaxTextExtent + 1]; FormatLocaleString(gamma,MaxTextExtent,"%3.6f",gamma_); modifyImage(); GammaImage(image(),gamma); } void Magick::Image::gamma(const double gammaRed_,const double gammaGreen_, const double gammaBlue_) { char gamma[MaxTextExtent + 1]; FormatLocaleString(gamma,MaxTextExtent,"%3.6f/%3.6f/%3.6f/",gammaRed_, gammaGreen_,gammaBlue_); modifyImage(); GammaImage(image(),gamma); throwImageException(); } void Magick::Image::gaussianBlur(const double width_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=GaussianBlurImage(constImage(),width_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::gaussianBlurChannel(const ChannelType channel_, const double width_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=GaussianBlurImageChannel(constImage(),channel_,width_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } const Magick::IndexPacket* Magick::Image::getConstIndexes(void) const { const Magick::IndexPacket *result; result=GetVirtualIndexQueue(constImage()); if (!result) throwImageException(); return(result); } const Magick::PixelPacket* Magick::Image::getConstPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) const { const PixelPacket *result; GetPPException; result=GetVirtualPixels(constImage(),x_,y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } Magick::IndexPacket *Magick::Image::getIndexes(void) { Magick::IndexPacket *result; result=GetAuthenticIndexQueue(image()); if(!result) throwImageException(); return(result); } Magick::PixelPacket *Magick::Image::getPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) { PixelPacket *result; modifyImage(); GetPPException; result=GetAuthenticPixels(image(),x_,y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::grayscale(const PixelIntensityMethod method_) { modifyImage(); (void) GrayscaleImage(image(),method_); throwImageException(); } void Magick::Image::haldClut(const Image &clutImage_) { modifyImage(); (void) HaldClutImage(image(),clutImage_.constImage()); throwImageException(); } void Magick::Image::houghLine(const size_t width_,const size_t height_, const size_t threshold_) { MagickCore::Image *newImage; GetPPException; newImage=HoughLineImage(constImage(),width_,height_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::implode(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=ImplodeImage(constImage(),factor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::inverseFourierTransform(const Image &phase_) { inverseFourierTransform(phase_,true); } void Magick::Image::inverseFourierTransform(const Image &phase_, const bool magnitude_) { MagickCore::Image *newImage; GetPPException; newImage=InverseFourierTransformImage(constImage(),phase_.constImage(), magnitude_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwahara(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=KuwaharaImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwaharaChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=KuwaharaImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::level(const double black_point,const double white_point, const double gamma) { char levels[MaxTextExtent]; modifyImage(); FormatLocaleString(levels,MaxTextExtent,"%g,%g,%g",black_point,white_point, gamma); (void) LevelImage(image(),levels); throwImageException(); } void Magick::Image::levelChannel(const Magick::ChannelType channel, const double black_point,const double white_point,const double gamma) { modifyImage(); (void) LevelImageChannel(image(),channel,black_point,white_point,gamma); throwImageException(); } void Magick::Image::levelColors(const Color &blackColor_, const Color &whiteColor_,const bool invert_) { MagickPixelPacket black, white; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(image(),&black); pixel=static_cast<PixelPacket>(blackColor_); black.red=pixel.red; black.green=pixel.green; black.blue=pixel.blue; black.opacity=pixel.opacity; GetMagickPixelPacket(image(),&white); pixel=static_cast<PixelPacket>(whiteColor_); white.red=pixel.red; white.green=pixel.green; white.blue=pixel.blue; white.opacity=pixel.opacity; (void) LevelColorsImage(image(),&black,&white, invert_ == true ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::levelColorsChannel(const ChannelType channel_, const Color &blackColor_,const Color &whiteColor_,const bool invert_) { MagickPixelPacket black, white; PixelPacket pixel; modifyImage(); GetMagickPixelPacket(image(),&black); pixel=static_cast<PixelPacket>(blackColor_); black.red=pixel.red; black.green=pixel.green; black.blue=pixel.blue; black.opacity=pixel.opacity; GetMagickPixelPacket(image(),&white); pixel=static_cast<PixelPacket>(whiteColor_); white.red=pixel.red; white.green=pixel.green; white.blue=pixel.blue; white.opacity=pixel.opacity; (void) LevelColorsImageChannel(image(),channel_,&black,&white, invert_ == true ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::levelize(const double blackPoint_,const double whitePoint_, const double gamma_) { modifyImage(); (void) LevelizeImage(image(),blackPoint_,whitePoint_,gamma_); throwImageException(); } void Magick::Image::levelizeChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_,const double gamma_) { modifyImage(); (void) LevelizeImageChannel(image(),channel_,blackPoint_,whitePoint_,gamma_); throwImageException(); } void Magick::Image::linearStretch(const double blackPoint_, const double whitePoint_) { modifyImage(); LinearStretchImage(image(),blackPoint_,whitePoint_); throwImageException(); } void Magick::Image::liquidRescale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=LiquidRescaleImage(constImage(),width,height,x,y,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::localContrast(const double radius_,const double strength_) { MagickCore::Image *newImage; GetPPException; newImage=LocalContrastImage(constImage(),radius_,strength_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::magnify(void) { MagickCore::Image *newImage; GetPPException; newImage=MagnifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::map(const Image &mapImage_,const bool dither_) { modifyImage(); options()->quantizeDither(dither_); RemapImage(options()->quantizeInfo(),image(),mapImage_.constImage()); throwImageException(); } void Magick::Image::matteFloodfill(const Color &target_, const unsigned int opacity_,const ssize_t x_,const ssize_t y_, const Magick::PaintMethod method_) { floodFillOpacity(x_,y_,opacity_,target_, method_ == FloodfillMethod ? false : true); } void Magick::Image::medianFilter(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(constImage(),MedianStatistic,(size_t) radius_, (size_t) radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::mergeLayers(const ImageLayerMethod layerMethod_) { MagickCore::Image *newImage; GetPPException; newImage=MergeImageLayers(image(),layerMethod_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::minify(void) { MagickCore::Image *newImage; GetPPException; newImage=MinifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::modulate(const double brightness_,const double saturation_, const double hue_) { char modulate[MaxTextExtent + 1]; FormatLocaleString(modulate,MaxTextExtent,"%3.6f,%3.6f,%3.6f",brightness_, saturation_,hue_); modifyImage(); ModulateImage(image(),modulate); throwImageException(); } Magick::ImageMoments Magick::Image::moments(void) const { return(ImageMoments(*this)); } void Magick::Image::morphology(const MorphologyMethod method_, const std::string kernel_,const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; kernel=AcquireKernelInfo(kernel_.c_str()); if (kernel == (KernelInfo *)NULL) throwExceptionExplicit(OptionError,"Unable to parse kernel."); GetPPException; newImage=MorphologyImage(constImage(),method_,iterations_,kernel, exceptionInfo); replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphology(const MorphologyMethod method_, const KernelInfoType kernel_,const std::string arguments_, const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(OptionError,"Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphology(method_,kernel,iterations_); } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const std::string kernel_, const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; kernel=AcquireKernelInfo(kernel_.c_str()); if (kernel == (KernelInfo *)NULL) { throwExceptionExplicit(OptionError,"Unable to parse kernel."); return; } GetPPException; newImage=MorphologyImageChannel(constImage(),channel_,method_,iterations_, kernel,exceptionInfo); replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const KernelInfoType kernel_, const std::string arguments_,const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(OptionError,"Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphologyChannel(channel_,method_,kernel,iterations_); } void Magick::Image::motionBlur(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=MotionBlurImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::negate(const bool grayscale_) { modifyImage(); NegateImage(image(),(MagickBooleanType) grayscale_); throwImageException(); } void Magick::Image::negateChannel(const ChannelType channel_, const bool grayscale_) { modifyImage(); NegateImageChannel(image(),channel_,(MagickBooleanType) grayscale_); throwImageException(); } void Magick::Image::normalize(void) { modifyImage(); NormalizeImage(image()); throwImageException(); } void Magick::Image::oilPaint(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=OilPaintImage(constImage(),radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::opacity(const unsigned int opacity_) { modifyImage(); SetImageOpacity(image(),opacity_); } void Magick::Image::opaque(const Color &opaqueColor_,const Color &penColor_, const bool invert_) { MagickPixelPacket opaque, pen; std::string opaqueColor, penColor; if (!opaqueColor_.isValid()) throwExceptionExplicit(OptionError,"Opaque color argument is invalid"); if (!penColor_.isValid()) throwExceptionExplicit(OptionError,"Pen color argument is invalid"); opaqueColor=opaqueColor_; penColor=penColor_; (void) QueryMagickColor(opaqueColor.c_str(),&opaque,&image()->exception); (void) QueryMagickColor(penColor.c_str(),&pen,&image()->exception); modifyImage(); OpaquePaintImage(image(),&opaque,&pen,invert_ ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::orderedDither(std::string thresholdMap_) { modifyImage(); GetPPException; (void) OrderedPosterizeImage(image(),thresholdMap_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::orderedDitherChannel(const ChannelType channel_, std::string thresholdMap_) { modifyImage(); GetPPException; (void) OrderedPosterizeImageChannel(image(),channel_,thresholdMap_.c_str(), exceptionInfo); ThrowImageException; } void Magick::Image::perceptible(const double epsilon_) { modifyImage(); PerceptibleImage(image(),epsilon_); throwImageException(); } void Magick::Image::perceptibleChannel(const ChannelType channel_, const double epsilon_) { modifyImage(); PerceptibleImageChannel(image(),channel_,epsilon_); throwImageException(); } void Magick::Image::ping(const Blob& blob_) { MagickCore::Image *newImage; GetPPException; newImage=PingBlob(imageInfo(),blob_.data(),blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::ping(const std::string &imageSpec_) { MagickCore::Image *newImage; GetPPException; options()->fileName(imageSpec_); newImage=PingImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::pixelColor(const ssize_t x_,const ssize_t y_, const Color &color_) { // Test arguments to ensure they are within the image. if (y_ > (ssize_t) rows() || x_ > (ssize_t) columns()) throwExceptionExplicit(OptionError,"Access outside of image boundary"); modifyImage(); // Set image to DirectClass classType(DirectClass); // Get pixel view Pixels pixels(*this); // Set pixel value *(pixels.get(x_,y_,1,1))=color_; // Tell ImageMagick that pixels have been updated pixels.sync(); } Magick::Color Magick::Image::pixelColor(const ssize_t x_, const ssize_t y_) const { ClassType storage_class; storage_class=classType(); if (storage_class == DirectClass) { const PixelPacket *pixel; pixel=getConstPixels(x_,y_,1,1); if (pixel) return(Color(*pixel)); } else if (storage_class == PseudoClass) { const IndexPacket *indexes; indexes=getConstIndexes(); if(indexes) return(colorMap((size_t) *indexes)); } return(Color()); // invalid } void Magick::Image::polaroid(const std::string &caption_,const double angle_) { MagickCore::Image *newImage; GetPPException; (void) SetImageProperty(image(),"Caption",caption_.c_str()); newImage=PolaroidImage(constImage(),options()->drawInfo(),angle_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::posterize(const size_t levels_,const bool dither_) { modifyImage(); PosterizeImage(image(),levels_,(MagickBooleanType) dither_); throwImageException(); } void Magick::Image::posterizeChannel(const ChannelType channel_, const size_t levels_,const bool dither_) { modifyImage(); PosterizeImageChannel(image(),channel_,levels_, (MagickBooleanType) dither_); throwImageException(); } void Magick::Image::process(std::string name_,const ssize_t argc, const char **argv) { size_t status; modifyImage(); status=InvokeDynamicImageFilter(name_.c_str(),&image(),argc, argv, &image()->exception); if (status == false) throwImageException(); } void Magick::Image::profile(const std::string name_, const Magick::Blob &profile_) { ssize_t result; modifyImage(); result=ProfileImage(image(),name_.c_str(),(unsigned char *)profile_.data(), profile_.length(),MagickTrue); if (!result) throwImageException(); } Magick::Blob Magick::Image::profile(const std::string name_) const { const StringInfo *profile; profile=GetImageProfile(constImage(),name_.c_str()); if (profile == (StringInfo *) NULL) return(Blob()); return(Blob((void*) GetStringInfoDatum(profile),GetStringInfoLength( profile))); } void Magick::Image::quantize(const bool measureError_) { modifyImage(); if (measureError_) options()->quantizeInfo()->measure_error=MagickTrue; else options()->quantizeInfo()->measure_error=MagickFalse; QuantizeImage(options()->quantizeInfo(),image()); throwImageException(); } void Magick::Image::quantumOperator(const ChannelType channel_, const MagickEvaluateOperator operator_,double rvalue_) { GetPPException; EvaluateImageChannel(image(),channel_,operator_,rvalue_,exceptionInfo); ThrowImageException; } void Magick::Image::quantumOperator(const ChannelType channel_, const MagickFunction function_,const size_t number_parameters_, const double *parameters_) { GetPPException; FunctionImageChannel(image(),channel_,function_,number_parameters_, parameters_,exceptionInfo); ThrowImageException; } void Magick::Image::quantumOperator(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_,const ChannelType channel_, const MagickEvaluateOperator operator_,const double rvalue_) { MagickCore::Image *cropImage; RectangleInfo geometry; GetPPException; geometry.width=columns_; geometry.height=rows_; geometry.x=x_; geometry.y=y_; cropImage=CropImage(image(),&geometry,exceptionInfo); EvaluateImageChannel(cropImage,channel_,operator_,rvalue_,exceptionInfo); (void) CompositeImage(image(),image()->matte != MagickFalse ? OverCompositeOp : CopyCompositeOp,cropImage,geometry.x, geometry.y); cropImage=DestroyImageList(cropImage); ThrowImageException; } void Magick::Image::raise(const Geometry &geometry_,const bool raisedFlag_) { RectangleInfo raiseInfo; raiseInfo=geometry_; modifyImage(); RaiseImage(image(),&raiseInfo,raisedFlag_ == true ? MagickTrue : MagickFalse); throwImageException(); } void Magick::Image::randomThreshold( const Geometry &thresholds_ ) { GetPPException; modifyImage(); (void) RandomThresholdImage(image(),static_cast<std::string>( thresholds_).c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::randomThresholdChannel(const Geometry &thresholds_, const ChannelType channel_) { GetPPException; modifyImage(); (void) RandomThresholdImageChannel(image(),channel_,static_cast<std::string>( thresholds_).c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::read(const Blob &blob_) { MagickCore::Image *newImage; GetPPException; newImage=BlobToImage(imageInfo(),static_cast<const void *>(blob_.data()), blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::read(const Blob &blob_,const Geometry &size_) { size(size_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_) { size(size_); depth(depth_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) { size(size_); depth(depth_); magick(magick_); fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const std::string &magick_) { size(size_); magick(magick_); fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Geometry &size_,const std::string &imageSpec_) { size(size_); read(imageSpec_); } void Magick::Image::read(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) { MagickCore::Image *newImage; GetPPException; newImage=ConstituteImage(width_,height_,map_.c_str(),type_,pixels_, exceptionInfo); replaceImage(newImage); ThrowImageException; if (newImage) throwException(&newImage->exception,quiet()); } void Magick::Image::read(const std::string &imageSpec_) { MagickCore::Image *newImage; options()->fileName(imageSpec_); GetPPException; newImage=ReadImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::readPixels(const Magick::QuantumType quantum_, const unsigned char *source_) { QuantumInfo *quantum_info; GetPPException; quantum_info=AcquireQuantumInfo(imageInfo(),image()); ImportQuantumPixels(image(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,source_,exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::reduceNoise(const double order_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(constImage(),NonpeakStatistic,(size_t) order_, (size_t) order_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::repage() { modifyImage(); options()->page(Geometry()); image()->page.width = 0; image()->page.height = 0; image()->page.x = 0; image()->page.y = 0; } void Magick::Image::resample(const Geometry &geometry_) { MagickCore::Image *newImage; size_t width=columns(), height=rows(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x, &y,&width, &height); GetPPException; newImage=ResampleImage(constImage(),width,height,image()->filter,1.0, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::resize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t width=columns(), height=rows(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x, &y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter,1.0, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const Geometry &roll_) { MagickCore::Image *newImage; ssize_t xOff=roll_.xOff(), yOff=roll_.yOff(); if (roll_.xNegative()) xOff=0-xOff; if (roll_.yNegative()) yOff=0-yOff; GetPPException; newImage=RollImage(constImage(),xOff,yOff,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const size_t columns_,const size_t rows_) { MagickCore::Image *newImage; GetPPException; newImage=RollImage(constImage(),static_cast<ssize_t>(columns_), static_cast<ssize_t>(rows_),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotate(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=RotateImage(constImage(),degrees_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlur(const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=RotationalBlurImage(constImage(),angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlurChannel(const ChannelType channel_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=RotationalBlurImageChannel(constImage(),channel_,angle_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sample(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=SampleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::scale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ScaleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::segment(const double clusterThreshold_, const double smoothingThreshold_) { modifyImage(); SegmentImage(image(),options()->quantizeColorSpace(), (MagickBooleanType) options()->verbose(),clusterThreshold_, smoothingThreshold_); throwImageException(); SyncImage(image()); throwImageException(); } void Magick::Image::selectiveBlur(const double radius_,const double sigma_, const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SelectiveBlurImage(constImage(),radius_,sigma_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::selectiveBlurChannel(const ChannelType channel_, const double radius_,const double sigma_,const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SelectiveBlurImageChannel(constImage(),channel_,radius_,sigma_, threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::separate(const ChannelType channel_) const { MagickCore::Image *image; GetPPException; image=SeparateImage(constImage(),channel_,exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::sepiaTone(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SepiaToneImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::PixelPacket *Magick::Image::setPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) { PixelPacket *result; modifyImage(); GetPPException; result=QueueAuthenticPixels(image(),x_, y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::shade(const double azimuth_,const double elevation_, const bool colorShading_) { MagickCore::Image *newImage; GetPPException; newImage=ShadeImage(constImage(),colorShading_ == true ? MagickTrue : MagickFalse,azimuth_,elevation_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shadow(const double percent_opacity_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=ShadowImage(constImage(),percent_opacity_,sigma_,x_,y_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=SharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpenChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=SharpenImageChannel(constImage(),channel_,radius_,sigma_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shave(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo shaveInfo=geometry_; GetPPException; newImage=ShaveImage(constImage(),&shaveInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shear(const double xShearAngle_,const double yShearAngle_) { MagickCore::Image *newImage; GetPPException; newImage=ShearImage(constImage(),xShearAngle_,yShearAngle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sigmoidalContrast(const size_t sharpen_, const double contrast,const double midpoint) { modifyImage(); (void) SigmoidalContrastImageChannel(image(),DefaultChannels, (MagickBooleanType) sharpen_,contrast,midpoint); throwImageException(); } std::string Magick::Image::signature(const bool force_) const { const char *property; Lock lock(&_imgRef->_mutexLock); // Re-calculate image signature if necessary if (force_ || !GetImageProperty(constImage(), "Signature") || constImage()->taint) SignatureImage(const_cast<MagickCore::Image *>(constImage())); property=GetImageProperty(constImage(),"Signature"); return(std::string(property)); } void Magick::Image::sketch(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=SketchImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::solarize(const double factor_) { modifyImage(); SolarizeImage(image(),factor_); throwImageException(); } void Magick::Image::sparseColor(const ChannelType channel, const SparseColorMethod method,const size_t number_arguments, const double *arguments) { MagickCore::Image *newImage; GetPPException; newImage=SparseColorImage(constImage(),channel,method,number_arguments, arguments,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo spliceInfo=geometry_; GetPPException; newImage=SpliceImage(constImage(),&spliceInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); splice(geometry_); } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { backgroundColor(backgroundColor_); image()->gravity=gravity_; splice(geometry_); } void Magick::Image::spread(const size_t amount_) { MagickCore::Image *newImage; GetPPException; newImage=SpreadImage(constImage(),amount_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::statistics(ImageStatistics *statistics) const { double maximum, minimum; GetPPException; (void) GetImageChannelRange(constImage(),RedChannel,&minimum,&maximum, exceptionInfo); statistics->red.minimum=minimum; statistics->red.maximum=maximum; (void) GetImageChannelMean(constImage(),RedChannel,&statistics->red.mean, &statistics->red.standard_deviation,exceptionInfo); (void) GetImageChannelKurtosis(constImage(),RedChannel, &statistics->red.kurtosis,&statistics->red.skewness,exceptionInfo); (void) GetImageChannelRange(constImage(),GreenChannel,&minimum,&maximum, exceptionInfo); statistics->green.minimum=minimum; statistics->green.maximum=maximum; (void) GetImageChannelMean(constImage(),GreenChannel,&statistics->green.mean, &statistics->green.standard_deviation,exceptionInfo); (void) GetImageChannelKurtosis(constImage(),GreenChannel, &statistics->green.kurtosis,&statistics->green.skewness,exceptionInfo); (void) GetImageChannelRange(constImage(),BlueChannel,&minimum,&maximum, exceptionInfo); statistics->blue.minimum=minimum; statistics->blue.maximum=maximum; (void) GetImageChannelMean(constImage(),BlueChannel,&statistics->blue.mean, &statistics->blue.standard_deviation,exceptionInfo); (void) GetImageChannelKurtosis(constImage(),BlueChannel, &statistics->blue.kurtosis,&statistics->blue.skewness,exceptionInfo); (void) GetImageChannelRange(constImage(),OpacityChannel,&minimum,&maximum, exceptionInfo); statistics->opacity.minimum=minimum; statistics->opacity.maximum=maximum; (void) GetImageChannelMean(constImage(),OpacityChannel, &statistics->opacity.mean,&statistics->opacity.standard_deviation, exceptionInfo); (void) GetImageChannelKurtosis(constImage(),OpacityChannel, &statistics->opacity.kurtosis,&statistics->opacity.skewness, exceptionInfo); ThrowImageException; } void Magick::Image::stegano(const Image &watermark_) { MagickCore::Image *newImage; GetPPException; newImage=SteganoImage(constImage(),watermark_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::stereo(const Image &rightImage_) { MagickCore::Image *newImage; GetPPException; newImage=StereoImage(constImage(),rightImage_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::strip(void) { modifyImage(); StripImage(image()); throwImageException(); } Magick::Image Magick::Image::subImageSearch(const Image &reference_, const MetricType metric_,Geometry *offset_,double *similarityMetric_, const double similarityThreshold) { char artifact[MaxTextExtent]; MagickCore::Image *newImage; RectangleInfo offset; modifyImage(); (void) FormatLocaleString(artifact,MaxTextExtent,"%g",similarityThreshold); (void) SetImageArtifact(image(),"compare:similarity-threshold",artifact); GetPPException; newImage=SimilarityMetricImage(image(),reference_.constImage(),metric_, &offset,similarityMetric_,exceptionInfo); ThrowImageException; if (offset_ != (Geometry *) NULL) *offset_=offset; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::swirl(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=SwirlImage(constImage(),degrees_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::syncPixels(void) { GetPPException; (void) SyncAuthenticPixels(image(),exceptionInfo); ThrowImageException; } void Magick::Image::texture(const Image &texture_) { modifyImage(); TextureImage(image(),texture_.constImage()); throwImageException(); } void Magick::Image::threshold(const double threshold_) { modifyImage(); BilevelImage(image(),threshold_); throwImageException(); } void Magick::Image::thumbnail(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ThumbnailImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::tint(const std::string opacity_) { MagickCore::Image *newImage; GetPPException; newImage=TintImage(constImage(),opacity_.c_str(),constOptions()->fillColor(), exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transform(const Geometry &imageGeometry_) { modifyImage(); TransformImage(&(image()),0,std::string(imageGeometry_).c_str()); throwImageException(); } void Magick::Image::transform(const Geometry &imageGeometry_, const Geometry &cropGeometry_) { modifyImage(); TransformImage(&(image()),std::string(cropGeometry_).c_str(), std::string(imageGeometry_).c_str()); throwImageException(); } void Magick::Image::transformOrigin(const double x_,const double y_) { modifyImage(); options()->transformOrigin(x_,y_); } void Magick::Image::transformReset(void) { modifyImage(); options()->transformReset(); } void Magick::Image::transformScale(const double sx_,const double sy_) { modifyImage(); options()->transformScale(sx_,sy_); } void Magick::Image::transparent(const Color &color_) { MagickPixelPacket target; std::string color; if (!color_.isValid()) throwExceptionExplicit(OptionError,"Color argument is invalid"); color=color_; (void) QueryMagickColor(std::string(color_).c_str(),&target, &image()->exception); modifyImage(); TransparentPaintImage(image(),&target,TransparentOpacity,MagickFalse); throwImageException(); } void Magick::Image::transparentChroma(const Color &colorLow_, const Color &colorHigh_) { MagickPixelPacket targetHigh, targetLow; std::string colorHigh, colorLow; if (!colorLow_.isValid() || !colorHigh_.isValid()) throwExceptionExplicit(OptionError,"Color argument is invalid"); colorLow=colorLow_; colorHigh=colorHigh_; (void) QueryMagickColor(colorLow.c_str(),&targetLow,&image()->exception); (void) QueryMagickColor(colorHigh.c_str(),&targetHigh,&image()->exception); modifyImage(); TransparentPaintImageChroma(image(),&targetLow,&targetHigh, TransparentOpacity,MagickFalse); throwImageException(); } void Magick::Image::transpose(void) { MagickCore::Image *newImage; GetPPException; newImage=TransposeImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transverse(void) { MagickCore::Image *newImage; GetPPException; newImage=TransverseImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::trim(void) { MagickCore::Image *newImage; GetPPException; newImage=TrimImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::uniqueColors(void) const { MagickCore::Image *image; GetPPException; image=UniqueImageColors(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::unsharpmask(const double radius_,const double sigma_, const double amount_,const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=UnsharpMaskImage(constImage(),radius_,sigma_,amount_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::unsharpmaskChannel(const ChannelType channel_, const double radius_,const double sigma_,const double amount_, const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=UnsharpMaskImageChannel(constImage(),channel_,radius_,sigma_, amount_,threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::vignette(const double radius_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=VignetteImage(constImage(),radius_,sigma_,x_,y_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::wave(const double amplitude_,const double wavelength_) { MagickCore::Image *newImage; GetPPException; newImage=WaveImage(constImage(),amplitude_,wavelength_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::waveletDenoise(const double threshold_, const double softness_) { MagickCore::Image *newImage; GetPPException; newImage=WaveletDenoiseImage(constImage(),threshold_,softness_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::whiteThreshold(const std::string &threshold_) { modifyImage(); WhiteThresholdImage(image(),threshold_.c_str()); throwImageException(); } void Magick::Image::whiteThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; WhiteThresholdImageChannel(image(),channel_,threshold_.c_str(), exceptionInfo); ThrowImageException; } void Magick::Image::write(Blob *blob_) { size_t length=0; void *data; modifyImage(); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; throwImageException(); } void Magick::Image::write(Blob *blob_,const std::string &magick_) { size_t length=0; void *data; modifyImage(); magick(magick_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; throwImageException(); } void Magick::Image::write(Blob *blob_,const std::string &magick_, const size_t depth_) { size_t length=0; void *data; modifyImage(); magick(magick_); depth(depth_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; throwImageException(); } void Magick::Image::write(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_,const std::string &map_, const StorageType type_,void *pixels_) { GetPPException; ExportImagePixels(constImage(),x_,y_,columns_,rows_,map_.c_str(),type_, pixels_,exceptionInfo); ThrowImageException; } void Magick::Image::write(const std::string &imageSpec_) { modifyImage(); fileName(imageSpec_); WriteImage(constImageInfo(),image()); throwImageException(); } void Magick::Image::writePixels(const Magick::QuantumType quantum_, unsigned char *destination_) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(imageInfo(),image()); GetPPException; ExportQuantumPixels(constImage(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,destination_,exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::zoom(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter,image()->blur, exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image::Image(MagickCore::Image *image_) : _imgRef(new ImageRef(image_)) { } MagickCore::Image *&Magick::Image::image(void) { return(_imgRef->image()); } const MagickCore::Image *Magick::Image::constImage(void) const { return(_imgRef->image()); } MagickCore::ImageInfo *Magick::Image::imageInfo(void) { return(_imgRef->options()->imageInfo()); } const MagickCore::ImageInfo *Magick::Image::constImageInfo(void) const { return(_imgRef->options()->imageInfo()); } Magick::Options *Magick::Image::options(void) { return(_imgRef->options()); } const Magick::Options *Magick::Image::constOptions(void) const { return(_imgRef->options()); } MagickCore::QuantizeInfo *Magick::Image::quantizeInfo(void) { return(_imgRef->options()->quantizeInfo()); } const MagickCore::QuantizeInfo *Magick::Image::constQuantizeInfo(void) const { return(_imgRef->options()->quantizeInfo()); } void Magick::Image::modifyImage(void) { { Lock lock(&_imgRef->_mutexLock); if (_imgRef->_refCount == 1) return; } GetPPException; replaceImage(CloneImage(constImage(),0,0,MagickTrue,exceptionInfo)); ThrowImageException; return; } MagickCore::Image *Magick::Image::replaceImage(MagickCore::Image *replacement_) { MagickCore::Image *image; if (replacement_) image=replacement_; else image=AcquireImage(constImageInfo()); { Lock lock(&_imgRef->_mutexLock); if (_imgRef->_refCount == 1) { // We own the image, just replace it, and de-register _imgRef->image(image); } else { // We don't own the image, dereference and replace with copy --_imgRef->_refCount; _imgRef=new ImageRef(image,constOptions()); } } return(_imgRef->_image); } void Magick::Image::throwImageException(void) const { // Throw C++ exception while resetting Image exception to default state throwException(&const_cast<MagickCore::Image*>(constImage())->exception, quiet()); } void Magick::Image::read(MagickCore::Image *image, MagickCore::ExceptionInfo *exceptionInfo) { // Ensure that multiple image frames were not read. if (image != (MagickCore::Image *) NULL && image->next != (MagickCore::Image *) NULL) { MagickCore::Image *next; // Destroy any extra image frames next=image->next; image->next=(MagickCore::Image *) NULL; next->previous=(MagickCore::Image *) NULL; DestroyImageList(next); } replaceImage(image); if (exceptionInfo->severity == MagickCore::UndefinedException && image == (MagickCore::Image *) NULL) { (void) MagickCore::DestroyExceptionInfo(exceptionInfo); throwExceptionExplicit(ImageWarning,"No image was loaded."); } ThrowImageException; if (image != (MagickCore::Image *) NULL) throwException(&image->exception,quiet()); } void Magick::Image::floodFill(const ssize_t x_,const ssize_t y_, const Magick::Image *fillPattern_,const Magick::Color &fill_, const MagickCore::PixelPacket *target_,const bool invert_) { Magick::Color fillColor; MagickCore::Image *fillPattern; MagickPixelPacket target; // Set drawing fill pattern or fill color fillColor=options()->fillColor(); fillPattern=(MagickCore::Image *)NULL; if (options()->fillPattern() != (MagickCore::Image *)NULL) { GetPPException; fillPattern=CloneImage(options()->fillPattern(),0,0,MagickTrue, exceptionInfo); ThrowImageException; } if (fillPattern_ == (Magick::Image *)NULL) { options()->fillPattern((MagickCore::Image *)NULL); options()->fillColor(fill_); } else options()->fillPattern(fillPattern_->constImage()); GetMagickPixelPacket(image(),&target); target.red=target_->red; target.green=target_->green; target.blue=target_->blue; (void) FloodfillPaintImage(image(),DefaultChannels,options()->drawInfo(), &target,static_cast<ssize_t>(x_),static_cast<ssize_t>(y_), (MagickBooleanType) invert_); options()->fillColor(fillColor); options()->fillPattern(fillPattern); throwImageException(); }
./CrossVul/dataset_final_sorted/CWE-416/cpp/bad_2969_0
crossvul-cpp_data_good_856_0
/* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include <proxygen/lib/http/codec/compress/HeaderTable.h> #include <glog/logging.h> using std::list; using std::pair; using std::string; namespace proxygen { void HeaderTable::init(uint32_t capacityVal) { bytes_ = 0; size_ = 0; head_ = 0; capacity_ = capacityVal; // at a minimum an entry will take 32 bytes uint32_t length = (capacityVal >> 5) + 1; table_.assign(length, HPACKHeader()); names_.clear(); } bool HeaderTable::add(const HPACKHeader& header) { // handle size overflow if (bytes_ + header.bytes() > capacity_) { evict(header.bytes()); } // this means the header entry is larger than our table if (bytes_ + header.bytes() > capacity_) { return false; } if (size_ > 0) { head_ = next(head_); } table_[head_] = header; // index name names_[header.name].push_back(head_); bytes_ += header.bytes(); ++size_; return true; } uint32_t HeaderTable::getIndex(const HPACKHeader& header) const { auto it = names_.find(header.name); if (it == names_.end()) { return 0; } for (auto i : it->second) { if (table_[i].value == header.value) { return toExternal(i); } } return 0; } bool HeaderTable::hasName(const std::string& name) { return names_.find(name) != names_.end(); } uint32_t HeaderTable::nameIndex(const std::string& name) const { auto it = names_.find(name); if (it == names_.end()) { return 0; } return toExternal(it->second.back()); } const HPACKHeader& HeaderTable::operator[](uint32_t i) const { CHECK(isValid(i)); return table_[toInternal(i)]; } bool HeaderTable::inReferenceSet(uint32_t index) const { return refset_.find(toInternal(index)) != refset_.end(); } bool HeaderTable::isSkippedReference(uint32_t index) const { return skippedRefs_.find(toInternal(index)) != skippedRefs_.end(); } void HeaderTable::clearSkippedReferences() { skippedRefs_.clear(); } void HeaderTable::addSkippedReference(uint32_t index) { skippedRefs_.insert(toInternal(index)); } void HeaderTable::addReference(uint32_t index) { refset_.insert(toInternal(index)); } void HeaderTable::removeReference(uint32_t index) { refset_.erase(toInternal(index)); } void HeaderTable::clearReferenceSet() { refset_.clear(); } list<uint32_t> HeaderTable::referenceSet() const { list<uint32_t> external; for (auto& i : refset_) { external.push_back(toExternal(i)); } // seems like the compiler will avoid the copy here return external; } void HeaderTable::removeLast() { auto t = tail(); refset_.erase(t); skippedRefs_.erase(t); // remove the first element from the names index auto names_it = names_.find(table_[t].name); DCHECK(names_it != names_.end()); list<uint32_t> &ilist = names_it->second; DCHECK(ilist.front() ==t); ilist.pop_front(); // remove the name if there are no indices associated with it if (ilist.empty()) { names_.erase(names_it); } bytes_ -= table_[t].bytes(); --size_; } void HeaderTable::setCapacity(uint32_t capacity) { // TODO: ddmello - the below is a little dangerous as we update the // capacity right away. Some properties of the class utilize that variable // and so might be better to refactor and update capacity at the end of the // method (and update other methods) auto oldCapacity = capacity_; capacity_ = capacity; if (capacity_ == oldCapacity) { return; } else if (capacity_ < oldCapacity) { // NOTE: currently no actual resizing is performed... evict(0); } else { // NOTE: due to the above lack of resizing, we must determine whether a // resize is actually appropriate (to handle cases where the underlying // vector is still >= to the size related to the new capacity requested) uint32_t newLength = (capacity_ >> 5) + 1; if (newLength > table_.size()) { auto oldTail = tail(); auto oldLength = table_.size(); table_.resize(newLength); if (size_ > 0 && oldTail > head_) { // the list wrapped around, need to move oldTail..oldLength to the end // of the now-larger table_ std::copy(table_.begin() + oldTail, table_.begin() + oldLength, table_.begin() + newLength - (oldLength - oldTail)); // Update the names indecies that pointed to the old range for (auto& names_it: names_) { for (auto& idx: names_it.second) { if (idx >= oldTail) { DCHECK_LT(idx + (table_.size() - oldLength), table_.size()); idx += (table_.size() - oldLength); } else { // remaining indecies in the list were smaller than oldTail, so // should be indexed from 0 break; } } } } } } } uint32_t HeaderTable::evict(uint32_t needed) { uint32_t evicted = 0; while (size_ > 0 && (bytes_ + needed > capacity_)) { removeLast(); ++evicted; } return evicted; } bool HeaderTable::isValid(uint32_t index) const { return 0 < index && index <= size_; } uint32_t HeaderTable::next(uint32_t i) const { return (i + 1) % table_.size(); } uint32_t HeaderTable::tail() const { return (head_ + table_.size() - size_ + 1) % table_.size(); } uint32_t HeaderTable::toExternal(uint32_t internalIndex) const { return toExternal(head_, table_.size(), internalIndex); } uint32_t HeaderTable::toExternal(uint32_t head, uint32_t length, uint32_t internalIndex) { return ((head + length - internalIndex) % length) + 1; } uint32_t HeaderTable::toInternal(uint32_t externalIndex) const { return toInternal(head_, table_.size(), externalIndex); } uint32_t HeaderTable::toInternal(uint32_t head, uint32_t length, uint32_t externalIndex) { // remove the offset --externalIndex; return (head + length - externalIndex) % length; } bool HeaderTable::operator==(const HeaderTable& other) const { if (size() != other.size()) { return false; } if (bytes() != other.bytes()) { return false; } list<uint32_t> refset = referenceSet(); refset.sort(); list<uint32_t> otherRefset = other.referenceSet(); otherRefset.sort(); if (refset != otherRefset) { return false; } return true; } std::ostream& operator<<(std::ostream& os, const HeaderTable& table) { os << std::endl; for (size_t i = 1; i <= table.size(); i++) { const HPACKHeader& h = table[i]; os << '[' << i << "] (s=" << h.bytes() << ") " << h.name << ": " << h.value << std::endl; } os << "reference set: ["; for (const auto& index : table.referenceSet()) { os << index << ", "; } os << "]" << std::endl; os << "total size: " << table.bytes() << std::endl; return os; } }
./CrossVul/dataset_final_sorted/CWE-416/cpp/good_856_0
crossvul-cpp_data_bad_856_0
/* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include <proxygen/lib/http/codec/compress/HeaderTable.h> #include <glog/logging.h> using std::list; using std::pair; using std::string; namespace proxygen { void HeaderTable::init(uint32_t capacityVal) { bytes_ = 0; size_ = 0; head_ = 0; capacity_ = capacityVal; // at a minimum an entry will take 32 bytes uint32_t length = (capacityVal >> 5) + 1; table_.assign(length, HPACKHeader()); names_.clear(); } bool HeaderTable::add(const HPACKHeader& header) { // handle size overflow if (bytes_ + header.bytes() > capacity_) { evict(header.bytes()); } // this means the header entry is larger than our table if (bytes_ + header.bytes() > capacity_) { return false; } if (size_ > 0) { head_ = next(head_); } table_[head_] = header; // index name names_[header.name].push_back(head_); bytes_ += header.bytes(); ++size_; return true; } uint32_t HeaderTable::getIndex(const HPACKHeader& header) const { auto it = names_.find(header.name); if (it == names_.end()) { return 0; } for (auto i : it->second) { if (table_[i].value == header.value) { return toExternal(i); } } return 0; } bool HeaderTable::hasName(const std::string& name) { return names_.find(name) != names_.end(); } uint32_t HeaderTable::nameIndex(const std::string& name) const { auto it = names_.find(name); if (it == names_.end()) { return 0; } return toExternal(it->second.back()); } const HPACKHeader& HeaderTable::operator[](uint32_t i) const { CHECK(isValid(i)); return table_[toInternal(i)]; } bool HeaderTable::inReferenceSet(uint32_t index) const { return refset_.find(toInternal(index)) != refset_.end(); } bool HeaderTable::isSkippedReference(uint32_t index) const { return skippedRefs_.find(toInternal(index)) != skippedRefs_.end(); } void HeaderTable::clearSkippedReferences() { skippedRefs_.clear(); } void HeaderTable::addSkippedReference(uint32_t index) { skippedRefs_.insert(toInternal(index)); } void HeaderTable::addReference(uint32_t index) { refset_.insert(toInternal(index)); } void HeaderTable::removeReference(uint32_t index) { refset_.erase(toInternal(index)); } void HeaderTable::clearReferenceSet() { refset_.clear(); } list<uint32_t> HeaderTable::referenceSet() const { list<uint32_t> external; for (auto& i : refset_) { external.push_back(toExternal(i)); } // seems like the compiler will avoid the copy here return external; } void HeaderTable::removeLast() { auto t = tail(); refset_.erase(t); skippedRefs_.erase(t); // remove the first element from the names index auto names_it = names_.find(table_[t].name); DCHECK(names_it != names_.end()); list<uint32_t> &ilist = names_it->second; DCHECK(ilist.front() ==t); ilist.pop_front(); // remove the name if there are no indices associated with it if (ilist.empty()) { names_.erase(names_it); } bytes_ -= table_[t].bytes(); --size_; } void HeaderTable::setCapacity(uint32_t capacity) { auto oldCapacity = capacity_; capacity_ = capacity; if (capacity_ <= oldCapacity) { evict(0); } else { auto oldTail = tail(); auto oldLength = table_.size(); uint32_t newLength = (capacity_ >> 5) + 1; table_.resize(newLength); if (size_ > 0 && oldTail > head_) { // the list wrapped around, need to move oldTail..oldLength to the end of // the now-larger table_ std::copy(table_.begin() + oldTail, table_.begin() + oldLength, table_.begin() + newLength - (oldLength - oldTail)); // Update the names indecies that pointed to the old range for (auto& names_it: names_) { for (auto& idx: names_it.second) { if (idx >= oldTail) { DCHECK_LT(idx + (table_.size() - oldLength), table_.size()); idx += (table_.size() - oldLength); } else { // remaining indecies in the list were smaller than oldTail, so // should be indexed from 0 break; } } } } } } uint32_t HeaderTable::evict(uint32_t needed) { uint32_t evicted = 0; while (size_ > 0 && (bytes_ + needed > capacity_)) { removeLast(); ++evicted; } return evicted; } bool HeaderTable::isValid(uint32_t index) const { return 0 < index && index <= size_; } uint32_t HeaderTable::next(uint32_t i) const { return (i + 1) % table_.size(); } uint32_t HeaderTable::tail() const { return (head_ + table_.size() - size_ + 1) % table_.size(); } uint32_t HeaderTable::toExternal(uint32_t internalIndex) const { return toExternal(head_, table_.size(), internalIndex); } uint32_t HeaderTable::toExternal(uint32_t head, uint32_t length, uint32_t internalIndex) { return ((head + length - internalIndex) % length) + 1; } uint32_t HeaderTable::toInternal(uint32_t externalIndex) const { return toInternal(head_, table_.size(), externalIndex); } uint32_t HeaderTable::toInternal(uint32_t head, uint32_t length, uint32_t externalIndex) { // remove the offset --externalIndex; return (head + length - externalIndex) % length; } bool HeaderTable::operator==(const HeaderTable& other) const { if (size() != other.size()) { return false; } if (bytes() != other.bytes()) { return false; } list<uint32_t> refset = referenceSet(); refset.sort(); list<uint32_t> otherRefset = other.referenceSet(); otherRefset.sort(); if (refset != otherRefset) { return false; } return true; } std::ostream& operator<<(std::ostream& os, const HeaderTable& table) { os << std::endl; for (size_t i = 1; i <= table.size(); i++) { const HPACKHeader& h = table[i]; os << '[' << i << "] (s=" << h.bytes() << ") " << h.name << ": " << h.value << std::endl; } os << "reference set: ["; for (const auto& index : table.referenceSet()) { os << index << ", "; } os << "]" << std::endl; os << "total size: " << table.bytes() << std::endl; return os; } }
./CrossVul/dataset_final_sorted/CWE-416/cpp/bad_856_0
crossvul-cpp_data_good_856_1
/* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include <folly/portability/GTest.h> #include <memory> #include <proxygen/lib/http/codec/compress/HeaderTable.h> #include <proxygen/lib/http/codec/compress/Logging.h> #include <sstream> using namespace std; using namespace testing; namespace proxygen { class HeaderTableTests : public testing::Test { protected: void xcheck(uint32_t internal, uint32_t external) { EXPECT_EQ(HeaderTable::toExternal(head_, length_, internal), external); EXPECT_EQ(HeaderTable::toInternal(head_, length_, external), internal); } void resizeTable(HeaderTable& table, uint32_t newCapacity, uint32_t newMax) { table.setCapacity(newCapacity); // On resizing the table size (count of headers) remains the same or sizes // down; can not size up EXPECT_LE(table.size(), newMax); } void resizeAndFillTable( HeaderTable& table, HPACKHeader& header, uint32_t newMax, uint32_t fillCount) { uint32_t newCapacity = header.bytes() * newMax; resizeTable(table, newCapacity, newMax); // Fill the table (with one extra) and make sure we haven't violated our // size (bytes) limits (expected one entry to be evicted) for (size_t i = 0; i <= fillCount; ++i) { EXPECT_EQ(table.add(header), true); } EXPECT_EQ(table.size(), newMax); EXPECT_EQ(table.bytes(), newCapacity); } uint32_t head_{0}; uint32_t length_{0}; }; TEST_F(HeaderTableTests, index_translation) { // simple cases length_ = 10; head_ = 5; xcheck(0, 6); xcheck(3, 3); xcheck(5, 1); // wrap head_ = 1; xcheck(0, 2); xcheck(8, 4); xcheck(5, 7); } TEST_F(HeaderTableTests, add) { HeaderTable table(4096); table.add(HPACKHeader("accept-encoding", "gzip")); table.add(HPACKHeader("accept-encoding", "gzip")); table.add(HPACKHeader("accept-encoding", "gzip")); EXPECT_EQ(table.names().size(), 1); EXPECT_EQ(table.hasName("accept-encoding"), true); auto it = table.names().find("accept-encoding"); EXPECT_EQ(it->second.size(), 3); EXPECT_EQ(table.nameIndex("accept-encoding"), 1); } TEST_F(HeaderTableTests, evict) { HPACKHeader accept("accept-encoding", "gzip"); HPACKHeader accept2("accept-encoding", "----"); // same size, different header HPACKHeader accept3("accept-encoding", "third"); // size is larger with 1 byte uint32_t max = 10; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); // fill the table for (size_t i = 0; i < max; i++) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table.add(accept2), true); // evict the first one EXPECT_EQ(table[1], accept2); auto ilist = table.names().find("accept-encoding")->second; EXPECT_EQ(ilist.size(), max); // evict all the 'accept' headers for (size_t i = 0; i < max - 1; i++) { EXPECT_EQ(table.add(accept2), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table[max], accept2); EXPECT_EQ(table.names().size(), 1); // add an entry that will cause 2 evictions EXPECT_EQ(table.add(accept3), true); EXPECT_EQ(table[1], accept3); EXPECT_EQ(table.size(), max - 1); // add a super huge header string bigvalue; bigvalue.append(capacity, 'x'); HPACKHeader bigheader("user-agent", bigvalue); EXPECT_EQ(table.add(bigheader), false); EXPECT_EQ(table.size(), 0); EXPECT_EQ(table.names().size(), 0); } TEST_F(HeaderTableTests, reduce_capacity) { HPACKHeader accept("accept-encoding", "gzip"); uint32_t max = 10; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); EXPECT_GT(table.length(), max); // fill the table for (size_t i = 0; i < max; i++) { EXPECT_EQ(table.add(accept), true); } // change capacity table.setCapacity(capacity / 2); EXPECT_EQ(table.size(), max / 2); EXPECT_EQ(table.bytes(), capacity / 2); } TEST_F(HeaderTableTests, comparison) { uint32_t capacity = 128; HeaderTable t1(capacity); HeaderTable t2(capacity); HPACKHeader h1("Content-Encoding", "gzip"); HPACKHeader h2("Content-Encoding", "deflate"); // different in number of elements t1.add(h1); EXPECT_FALSE(t1 == t2); // different in size (bytes) t2.add(h2); EXPECT_FALSE(t1 == t2); // make them the same t1.add(h2); t2.add(h1); EXPECT_TRUE(t1 == t2); // make them mismatch on refset t1.addReference(1); EXPECT_FALSE(t1 == t2); } TEST_F(HeaderTableTests, print) { stringstream out; HeaderTable t(128); t.add(HPACKHeader("Accept-Encoding", "gzip")); t.addReference(1); out << t; EXPECT_EQ(out.str(), "\n[1] (s=51) Accept-Encoding: gzip\nreference set: [1, ]\ntotal size: 51\n"); } TEST_F(HeaderTableTests, increaseCapacity) { HPACKHeader accept("accept-encoding", "gzip"); uint32_t max = 4; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); EXPECT_GT(table.length(), max); // fill the table for (size_t i = 0; i < table.length() + 1; i++) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table.getIndex(accept), 4); // head should be 0, tail should be 2 max = 8; table.setCapacity(accept.bytes() * max); EXPECT_GT(table.length(), max); // external index didn't change EXPECT_EQ(table.getIndex(accept), 4); } TEST_F(HeaderTableTests, varyCapacity) { HPACKHeader accept("accept-encoding", "gzip"); uint32_t max = 6; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); // Fill the table (extra) and make sure we haven't violated our // size (bytes) limits (expected one entry to be evicted) for (size_t i = 0; i <= table.length(); ++i) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); // Size down the table and verify we are still honoring our size (bytes) // limits resizeAndFillTable(table, accept, 4, 5); // Size up the table (in between previous max and min within test) and verify // we are still horing our size (bytes) limits resizeAndFillTable(table, accept, 5, 6); // Finally reize up one last timestamps resizeAndFillTable(table, accept, 8, 9); } TEST_F(HeaderTableTests, varyCapacityMalignHeadIndex) { // Test checks for a previous bug/crash condition where due to resizing // the underlying table to a size lower than a previous max but up from the // current size and the position of the head_ index an out of bounds index // would occur // Initialize header table HPACKHeader accept("accept-encoding", "gzip"); uint32_t max = 6; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); // Push head_ to last index in underlying table before potential wrap // This is our max table size for the duration of the test for (size_t i = 0; i < table.length(); ++i) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table.bytes(), capacity); // Flush underlying table (head_ remains the same at the previous max index) // Header guranteed to cause a flush as header itself requires 32 bytes plus // the sizes of the name and value anyways (which themselves would cause a // flush) string strLargerThanTableCapacity = string(capacity + 1, 'a'); HPACKHeader flush("flush", strLargerThanTableCapacity); EXPECT_EQ(table.add(flush), false); EXPECT_EQ(table.size(), 0); // Now reduce capacity of table (in functional terms table.size() is lowered // but currently table.length() remains the same) max = 3; resizeTable(table, accept.bytes() * max, max); // Increase capacity of table (but smaller than all time max; head_ still at // previous max index). Previously (now fixed) this size up resulted in // incorrect resizing semantics max = 4; resizeTable(table, accept.bytes() * max, max); // Now try and add headers; there should be no crash with current position of // head_ in the underlying table. Note this is merely one possible way we // could force the test to crash as a result of the resize bug this test was // added for for (size_t i = 0; i <= table.length(); ++i) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); } }
./CrossVul/dataset_final_sorted/CWE-416/cpp/good_856_1
crossvul-cpp_data_good_4222_0
/* * Copyright (c) 2009, The MilkyTracker Team. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name of the <ORGANIZATION> nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PlayerGeneric.cpp * MilkyPlay * * PlayerGeneric is a wrapper that allocates a suiting type of player * for a module while providing the same player interfaces. * Currently there are three types of players: PlayerFAR, PlayerSTD and PlayerIT * */ #include "PlayerGeneric.h" #include "MasterMixer.h" #include "XModule.h" #include "AudioDriver_WAVWriter.h" #include "AudioDriverManager.h" #include "PlayerBase.h" #include "PlayerSTD.h" #ifndef MILKYTRACKER #include "PlayerIT.h" #include "PlayerFAR.h" #endif #undef __VERBOSE__ class MixerNotificationListener : public MasterMixer::MasterMixerNotificationListener { private: class PlayerGeneric& player; public: MixerNotificationListener(PlayerGeneric& player) : player(player) { } virtual void masterMixerNotification(MasterMixer::MasterMixerNotifications notification) { player.adjustSettings(); } }; void PlayerGeneric::adjustSettings() { mp_uint32 bufferSize = mixer->getBufferSize(); mp_uint32 sampleRate = mixer->getSampleRate(); this->bufferSize = bufferSize; this->frequency = sampleRate; if (player) { player->setBufferSize(bufferSize); player->adjustFrequency(sampleRate); } } PlayerBase::PlayerTypes PlayerGeneric::getPreferredPlayerType(XModule* module) { if (module == NULL) return PlayerBase::PlayerType_INVALID; switch (module->getType()) { case XModule::ModuleType_669: case XModule::ModuleType_FAR: #ifndef MILKYTRACKER return PlayerBase::PlayerType_FAR; break; #endif case XModule::ModuleType_IT: #ifndef MILKYTRACKER return PlayerBase::PlayerType_IT; break; #endif case XModule::ModuleType_UNKNOWN: // just assume our standard player can handle this //case XModule::ModuleType_669: case XModule::ModuleType_AMF: case XModule::ModuleType_AMS: case XModule::ModuleType_CBA: case XModule::ModuleType_DBM: case XModule::ModuleType_DIGI: case XModule::ModuleType_DSM: case XModule::ModuleType_DSm: case XModule::ModuleType_DTM_1: case XModule::ModuleType_DTM_2: case XModule::ModuleType_GDM: case XModule::ModuleType_GMC: case XModule::ModuleType_IMF: case XModule::ModuleType_MDL: case XModule::ModuleType_MOD: case XModule::ModuleType_MTM: case XModule::ModuleType_MXM: case XModule::ModuleType_OKT: case XModule::ModuleType_PLM: case XModule::ModuleType_PSM: case XModule::ModuleType_PTM: case XModule::ModuleType_S3M: case XModule::ModuleType_STM: case XModule::ModuleType_SFX: case XModule::ModuleType_UNI: case XModule::ModuleType_ULT: case XModule::ModuleType_XM: case XModule::ModuleType_NONE: return PlayerBase::PlayerType_Generic; break; default: return PlayerBase::PlayerType_INVALID; } } PlayerBase* PlayerGeneric::getPreferredPlayer(XModule* module) const { switch (getPreferredPlayerType(module)) { #ifndef MILKYTRACKER case PlayerBase::PlayerType_FAR: return new PlayerFAR(frequency); case PlayerBase::PlayerType_IT: return new PlayerIT(frequency); #endif case PlayerBase::PlayerType_Generic: return new PlayerSTD(frequency); default: return NULL; } } PlayerGeneric::PlayerGeneric(mp_sint32 frequency, AudioDriverInterface* audioDriver/* = NULL*/) : mixer(NULL), player(NULL), frequency(frequency), audioDriver(audioDriver), audioDriverName(NULL) { listener = new MixerNotificationListener(*this); bufferSize = 0; sampleShift = 0; resamplerType = MIXER_NORMAL; idle = false; playOneRowOnly = false; paused = false; repeat = false; resetOnStopFlag = false; autoAdjustPeak = false; disableMixing = false; allowFilters = false; #ifdef __FORCEPOWEROFTWOBUFFERSIZE__ compensateBufferFlag = true; #else compensateBufferFlag = false; #endif masterVolume = panningSeparation = numMaxVirChannels = 256; resetMainVolumeOnStartPlayFlag = true; playMode = PlayMode_Auto; // Special playmode settings options[PlayModeOptionPanning8xx] = true; options[PlayModeOptionPanningE8x] = false; options[PlayModeOptionForcePTPitchLimit] = true; AudioDriverManager audioDriverManager; const char* defaultName = audioDriverManager.getPreferredAudioDriver()->getDriverID(); if (defaultName) { audioDriverName = new char[strlen(defaultName)+1]; strcpy(audioDriverName, defaultName); } } PlayerGeneric::~PlayerGeneric() { if (player) { if (mixer && mixer->isActive() && !mixer->isDeviceRemoved(player)) mixer->removeDevice(player); delete player; } if (mixer) delete mixer; delete[] audioDriverName; delete listener; } // -- wrapping mixer specific stuff ---------------------- void PlayerGeneric::setResamplerType(ResamplerTypes type) { resamplerType = type; if (player) player->setResamplerType(type); } void PlayerGeneric::setResamplerType(bool interpolation, bool ramping) { if (interpolation) { if (ramping) resamplerType = MIXER_LERPING_RAMPING; else resamplerType = MIXER_LERPING; } else { if (ramping) resamplerType = MIXER_NORMAL_RAMPING; else resamplerType = MIXER_NORMAL; } if (player) player->setResamplerType(resamplerType); } ChannelMixer::ResamplerTypes PlayerGeneric::getResamplerType() const { if (player) return player->getResamplerType(); return resamplerType; } void PlayerGeneric::setSampleShift(mp_sint32 shift) { sampleShift = shift; if (mixer) mixer->setSampleShift(shift); } mp_sint32 PlayerGeneric::getSampleShift() const { if (mixer) return mixer->getSampleShift(); return sampleShift; } void PlayerGeneric::setPeakAutoAdjust(bool b) { this->autoAdjustPeak = b; } mp_sint32 PlayerGeneric::adjustFrequency(mp_uint32 frequency) { this->frequency = frequency; mp_sint32 res = MP_OK; if (mixer) res = mixer->setSampleRate(frequency); return res; } mp_sint32 PlayerGeneric::getMixFrequency() const { if (player) return player->getMixFrequency(); return frequency; } mp_sint32 PlayerGeneric::beatPacketsToBufferSize(mp_uint32 numBeats) { return ChannelMixer::beatPacketsToBufferSize(getMixFrequency(), numBeats); } mp_sint32 PlayerGeneric::adjustBufferSize(mp_uint32 numBeats) { return setBufferSize(beatPacketsToBufferSize(numBeats)); } mp_sint32 PlayerGeneric::setBufferSize(mp_uint32 bufferSize) { mp_sint32 res = 0; this->bufferSize = bufferSize; if (mixer) { // If we're told to compensate the samples until we // we reached 2^n buffer sizes if (compensateBufferFlag) { for (mp_uint32 i = 0; i < 16; i++) { if ((unsigned)(1 << i) >= (unsigned)bufferSize) { bufferSize = 1 << i; break; } } } res = mixer->setBufferSize(bufferSize); } return res; } mp_sint32 PlayerGeneric::setPowerOfTwoCompensationFlag(bool b) { if (mixer && compensateBufferFlag != b) { compensateBufferFlag = b; setBufferSize(bufferSize); } return MP_OK; } bool PlayerGeneric::getPowerOfTwoCompensationFlag() const { return compensateBufferFlag; } const char* PlayerGeneric::getCurrentAudioDriverName() const { if (mixer) return mixer->getCurrentAudioDriverName(); return audioDriverName; } bool PlayerGeneric::setCurrentAudioDriverByName(const char* name) { if (name == NULL) return false; if (mixer) { bool res = mixer->setCurrentAudioDriverByName(name); if (audioDriverName) delete[] audioDriverName; const char* curDrvName = getCurrentAudioDriverName(); ASSERT(curDrvName); audioDriverName = new char[strlen(curDrvName)+1]; strcpy(audioDriverName, curDrvName); return res; } AudioDriverManager audioDriverManager; if (audioDriverManager.getAudioDriverByName(name)) { if (audioDriverName) delete[] audioDriverName; audioDriverName = new char[strlen(name)+1]; strcpy(audioDriverName, name); return true; } return false; } bool PlayerGeneric::isInitialized() const { if (mixer) return mixer->isInitialized(); return false; } bool PlayerGeneric::isPlaying() const { if (mixer) return mixer->isPlaying(); return false; } mp_int64 PlayerGeneric::getSampleCounter() const { if (player) return player->getSampleCounter(); return 0; } void PlayerGeneric::resetSampleCounter() { if (player) player->resetSampleCounter(); } mp_sint32 PlayerGeneric::getCurrentSamplePosition() const { if (mixer && mixer->getAudioDriver()) return mixer->getAudioDriver()->getBufferPos(); return 0; } mp_sint32 PlayerGeneric::getCurrentBeatIndex() { if (player) return player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return 0; } mp_sint32 PlayerGeneric::getCurrentSample(mp_sint32 position, mp_sint32 channel) { if (mixer) return mixer->getCurrentSample(position, channel); return 0; } mp_sint32 PlayerGeneric::getCurrentSamplePeak(mp_sint32 position, mp_sint32 channel) { if (mixer) return mixer->getCurrentSamplePeak(position, channel); return 0; } void PlayerGeneric::resetChannels() { if (player) player->resetChannelsFull(); } mp_sint32 PlayerGeneric::getNumAllocatedChannels() const { if (player) return player->getNumAllocatedChannels(); return 0; } mp_sint32 PlayerGeneric::getNumActiveChannels() const { if (player) return player->getNumActiveChannels(); return 0; } // -- wrapping player specific stuff ---------------------- void PlayerGeneric::setPlayMode(PlayModes mode) { playMode = mode; if (player) player->setPlayMode(mode); } PlayerGeneric::PlayModes PlayerGeneric::getPlayMode() const { if (player) return player->getPlayMode(); return playMode; } void PlayerGeneric::enable(PlayModeOptions option, bool b) { ASSERT(option>=PlayModeOptionFirst && option<PlayModeOptionLast); options[option] = b; if (player) player->enable(option, b); } bool PlayerGeneric::isEnabled(PlayModeOptions option) const { ASSERT(option>=PlayModeOptionFirst && option<PlayModeOptionLast); if (!player) return options[option]; else return player->isEnabled(option); } void PlayerGeneric::restart(mp_uint32 startPosition/* = 0*/, mp_uint32 startRow/* = 0*/, bool resetMixer/* = true*/, const mp_ubyte* customPanningTable/* = NULL*/, bool playOneRowOnly/* = false*/) { if (player) player->restart(startPosition, startRow, resetMixer, customPanningTable, playOneRowOnly); } void PlayerGeneric::reset() { if (player) player->reset(); } void PlayerGeneric::resetAllSpeed() { if (player) player->resetAllSpeed(); } mp_sint32 PlayerGeneric::startPlaying(XModule* module, bool repeat/* = false*/, mp_uint32 startPosition/* = 0*/, mp_uint32 startRow/* = 0*/, mp_sint32 numChannels/* = -1*/, const mp_ubyte* customPanningTable/* = NULL*/, bool idle/* = false*/, mp_sint32 patternIndex/* = -1*/, bool playOneRowOnly/* = false*/) { this->idle = idle; this->repeat = repeat; this->playOneRowOnly = playOneRowOnly; if (mixer == NULL) { mixer = new MasterMixer(frequency, bufferSize, 1, audioDriver); mixer->setMasterMixerNotificationListener(listener); mixer->setSampleShift(sampleShift); if (audioDriver == NULL) mixer->setCurrentAudioDriverByName(audioDriverName); } if (!player || player->getType() != getPreferredPlayerType(module)) { if (player) { if (!mixer->isDeviceRemoved(player)) mixer->removeDevice(player); delete player; } player = getPreferredPlayer(module); if (player) { // apply our own "state" to the state of the newly allocated player player->resetMainVolumeOnStartPlay(resetMainVolumeOnStartPlayFlag); player->resetOnStop(resetOnStopFlag); player->setBufferSize(bufferSize); player->setResamplerType(resamplerType); player->setMasterVolume(masterVolume); player->setPanningSeparation(panningSeparation); player->setPlayMode(playMode); for (mp_sint32 i = PlayModeOptionFirst; i < PlayModeOptionLast; i++) player->enable((PlayModeOptions)i, options[i]); player->setDisableMixing(disableMixing); player->setAllowFilters(allowFilters); //if (paused) // player->pausePlaying(); // adjust number of virtual channels if necessary setNumMaxVirChannels(numMaxVirChannels); } } if (player && mixer) { if (!mixer->isDeviceRemoved(player)) mixer->removeDevice(player); player->startPlaying(module, repeat, startPosition, startRow, numChannels, customPanningTable, idle, patternIndex, playOneRowOnly); mixer->addDevice(player); if (!mixer->isPlaying()) return mixer->start(); } return MP_OK; } void PlayerGeneric::setPatternToPlay(mp_sint32 patternIndex) { if (player) player->setPatternToPlay(patternIndex); } mp_sint32 PlayerGeneric::stopPlaying() { if (player) player->stopPlaying(); if (mixer) return mixer->stop(); return MP_OK; } bool PlayerGeneric::hasSongHalted() const { if (player) return player->hasSongHalted(); return true; } void PlayerGeneric::setIdle(bool idle) { this->idle = idle; if (player) player->setIdle(idle); } bool PlayerGeneric::isIdle() const { if (player) return player->isIdle(); return idle; } void PlayerGeneric::setRepeat(bool repeat) { this->repeat = repeat; if (player) player->setRepeat(repeat); } bool PlayerGeneric::isRepeating() const { if (player) return player->isRepeating(); return repeat; } mp_sint32 PlayerGeneric::pausePlaying() { paused = true; if (mixer) return mixer->pause(); return MP_OK; } mp_sint32 PlayerGeneric::resumePlaying() { if (player && !player->isPlaying()) player->resumePlaying(); if (mixer && mixer->isPaused()) return mixer->resume(); else if (mixer && !mixer->isPlaying()) return mixer->start(); return MP_OK; } bool PlayerGeneric::isPaused() const { if (mixer) return mixer->isPaused(); return paused; } void PlayerGeneric::setDisableMixing(bool b) { disableMixing = b; if (player) player->setDisableMixing(disableMixing); } void PlayerGeneric::setAllowFilters(bool b) { allowFilters = b; if (player) player->setAllowFilters(allowFilters); } bool PlayerGeneric::getAllowFilters() const { if (player) return player->getAllowFilters(); return allowFilters; } // volume control void PlayerGeneric::setMasterVolume(mp_sint32 vol) { masterVolume = vol; if (player) player->setMasterVolume(vol); } mp_sint32 PlayerGeneric::getMasterVolume() const { if (player) return player->getMasterVolume(); return masterVolume; } // panning control void PlayerGeneric::setPanningSeparation(mp_sint32 separation) { panningSeparation = separation; if (player) player->setPanningSeparation(separation); } mp_sint32 PlayerGeneric::getPanningSeparation() const { if (player) return player->getPanningSeparation(); return panningSeparation; } mp_sint32 PlayerGeneric::getSongMainVolume() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getSongMainVolume(index); } return 255; } mp_sint32 PlayerGeneric::getRow() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getRow(index); } return 0; } mp_sint32 PlayerGeneric::getOrder() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getOrder(index); } return 0; } void PlayerGeneric::getPosition(mp_sint32& order, mp_sint32& row) const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); player->getPosition(order, row, index); return; } order = row = 0; } mp_sint32 PlayerGeneric::getLastUnvisitedPosition() const { if (player) return player->getLastUnvisitedPosition(); return 0; } void PlayerGeneric::getPosition(mp_sint32& order, mp_sint32& row, mp_sint32& ticker) const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); player->getPosition(order, row, ticker, index); return; } order = row = ticker = 0; } mp_int64 PlayerGeneric::getSyncCount() const { if (player) return player->getSyncCount(); return 0; } mp_uint32 PlayerGeneric::getSyncSampleCounter() const { if (player) return player->getSyncSampleCounter(); return 0; } void PlayerGeneric::nextPattern() { if (player) player->nextPattern(); } void PlayerGeneric::lastPattern() { if (player) player->lastPattern(); } void PlayerGeneric::setPatternPos(mp_uint32 pos, mp_uint32 row/* = 0*/, bool resetChannels/* = true*/, bool resetFXMemory/* = true*/) { if (player) player->setPatternPos(pos, row, resetChannels, resetFXMemory); } mp_sint32 PlayerGeneric::getTempo() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getTempo(index); } return 0; } mp_sint32 PlayerGeneric::getSpeed() const { if (player) { mp_uint32 index = player->getBeatIndexFromSamplePos(getCurrentSamplePosition()); return player->getSpeed(index); } return 0; } void PlayerGeneric::resetOnStop(bool b) { resetOnStopFlag = b; if (player) player->resetOnStop(b); } void PlayerGeneric::resetMainVolumeOnStartPlay(bool b) { resetMainVolumeOnStartPlayFlag = b; if (player) player->resetMainVolumeOnStartPlay(b); } struct PeakAutoAdjustFilter : public Mixable { mp_uint32 mixerShift; mp_uint32 masterVolume; mp_sint32 lastPeakValue; PeakAutoAdjustFilter() : mixerShift(0), masterVolume(256), lastPeakValue(0) { } virtual void mix(mp_sint32* buffer, mp_uint32 bufferSize) { const mp_sint32* buffer32 = buffer; for (mp_uint32 i = 0; i < bufferSize*MP_NUMCHANNELS; i++) { mp_sint32 b = *buffer32++; if (abs(b) > lastPeakValue) lastPeakValue = abs(b); } } void calculateMasterVolume() { if (lastPeakValue) { float v = 32768.0f*(1<<mixerShift) / (float)lastPeakValue; masterVolume = (mp_sint32)((float)masterVolume*v); if (masterVolume > 256) masterVolume = 256; } } }; // export to 16bit stereo WAV mp_sint32 PlayerGeneric::exportToWAV(const SYSCHAR* fileName, XModule* module, mp_sint32 startOrder/* = 0*/, mp_sint32 endOrder/* = -1*/, const mp_ubyte* mutingArray/* = NULL*/, mp_uint32 mutingNumChannels/* = 0*/, const mp_ubyte* customPanningTable/* = NULL*/, AudioDriverBase* preferredDriver/* = NULL*/, mp_sint32* timingLUT/* = NULL*/) { PlayerBase* player = NULL; AudioDriverBase* wavWriter = preferredDriver; bool isWAVWriterDriver = false; if (wavWriter == NULL) { wavWriter = new WAVWriter(fileName); isWAVWriterDriver = true; if (!static_cast<WAVWriter*>(wavWriter)->isOpen()) { delete wavWriter; return MP_DEVICE_ERROR; } } MasterMixer mixer(frequency, bufferSize, 1, wavWriter); mixer.setSampleShift(sampleShift); mixer.setDisableMixing(disableMixing); player = getPreferredPlayer(module); PeakAutoAdjustFilter filter; if (autoAdjustPeak) mixer.setFilterHook(&filter); if (player) { player->adjustFrequency(frequency); player->resetOnStop(resetOnStopFlag); player->setBufferSize(bufferSize); player->setResamplerType(resamplerType); player->setMasterVolume(masterVolume); player->setPlayMode(playMode); player->setDisableMixing(disableMixing); player->setAllowFilters(allowFilters); #ifndef MILKYTRACKER if (player->getType() == PlayerBase::PlayerType_IT) { static_cast<PlayerIT*>(player)->setNumMaxVirChannels(numMaxVirChannels); } #endif mixer.addDevice(player); } if (player) { if (mutingArray && mutingNumChannels > 0 && mutingNumChannels <= module->header.channum) { for (mp_uint32 i = 0; i < mutingNumChannels; i++) player->muteChannel(i, mutingArray[i] == 1); } player->startPlaying(module, false, startOrder, 0, -1, customPanningTable, false, -1); mixer.start(); } if (endOrder == -1 || endOrder < startOrder || endOrder > module->header.ordnum - 1) endOrder = module->header.ordnum - 1; mp_sint32 curOrderPos = startOrder; if (timingLUT) { for (mp_sint32 i = 0; i < module->header.ordnum; i++) timingLUT[i] = -1; timingLUT[curOrderPos] = 0; } while (!player->hasSongHalted() && player->getOrder(0) <= endOrder) { wavWriter->advance(); if (player->getOrder(0) != curOrderPos) { #ifdef __VERBOSE__ printf("%f\n", (float)wavWriter->getNumPlayedSamples() / (float)getMixFrequency()); #endif curOrderPos = player->getOrder(0); if (timingLUT && curOrderPos < module->header.ordnum && timingLUT[curOrderPos] == -1) timingLUT[curOrderPos] = wavWriter->getNumPlayedSamples(); } } player->stopPlaying(); mixer.stop(); // important step, otherwise destruction of the audio driver will cause // trouble if the mixer instance is removed from this function's stack // and trys to access the driver which is no longer existant mixer.closeAudioDevice(); // Sync value sampleShift = mixer.getSampleShift(); filter.mixerShift = sampleShift; filter.calculateMasterVolume(); masterVolume = filter.masterVolume; delete player; mp_sint32 numWrittenSamples = wavWriter->getNumPlayedSamples(); if (isWAVWriterDriver) delete wavWriter; return numWrittenSamples; } bool PlayerGeneric::grabChannelInfo(mp_sint32 chn, TPlayerChannelInfo& channelInfo) const { if (player) return player->grabChannelInfo(chn, channelInfo); return false; } void PlayerGeneric::setNumMaxVirChannels(mp_sint32 max) { numMaxVirChannels = max; #ifndef MILKYTRACKER if (player) { if (player->getType() == PlayerBase::PlayerType_IT) { static_cast<PlayerIT*>(player)->setNumMaxVirChannels(max); } } #endif } mp_sint32 PlayerGeneric::getNumMaxVirChannels() const { #ifndef MILKYTRACKER if (player) { if (player->getType() == PlayerBase::PlayerType_IT) { return static_cast<PlayerIT*>(player)->getNumMaxVirChannels(); } } #endif return numMaxVirChannels; } // milkytracker void PlayerGeneric::setPanning(mp_ubyte chn, mp_ubyte pan) { if (player) player->setPanning(chn, pan); }
./CrossVul/dataset_final_sorted/CWE-416/cpp/good_4222_0
crossvul-cpp_data_bad_856_1
/* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include <folly/portability/GTest.h> #include <memory> #include <proxygen/lib/http/codec/compress/HeaderTable.h> #include <proxygen/lib/http/codec/compress/Logging.h> #include <sstream> using namespace std; using namespace testing; namespace proxygen { class HeaderTableTests : public testing::Test { protected: void xcheck(uint32_t internal, uint32_t external) { EXPECT_EQ(HeaderTable::toExternal(head_, length_, internal), external); EXPECT_EQ(HeaderTable::toInternal(head_, length_, external), internal); } uint32_t head_{0}; uint32_t length_{0}; }; TEST_F(HeaderTableTests, index_translation) { // simple cases length_ = 10; head_ = 5; xcheck(0, 6); xcheck(3, 3); xcheck(5, 1); // wrap head_ = 1; xcheck(0, 2); xcheck(8, 4); xcheck(5, 7); } TEST_F(HeaderTableTests, add) { HeaderTable table(4096); table.add(HPACKHeader("accept-encoding", "gzip")); table.add(HPACKHeader("accept-encoding", "gzip")); table.add(HPACKHeader("accept-encoding", "gzip")); EXPECT_EQ(table.names().size(), 1); EXPECT_EQ(table.hasName("accept-encoding"), true); auto it = table.names().find("accept-encoding"); EXPECT_EQ(it->second.size(), 3); EXPECT_EQ(table.nameIndex("accept-encoding"), 1); } TEST_F(HeaderTableTests, evict) { HPACKHeader accept("accept-encoding", "gzip"); HPACKHeader accept2("accept-encoding", "----"); // same size, different header HPACKHeader accept3("accept-encoding", "third"); // size is larger with 1 byte uint32_t max = 10; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); // fill the table for (size_t i = 0; i < max; i++) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table.add(accept2), true); // evict the first one EXPECT_EQ(table[1], accept2); auto ilist = table.names().find("accept-encoding")->second; EXPECT_EQ(ilist.size(), max); // evict all the 'accept' headers for (size_t i = 0; i < max - 1; i++) { EXPECT_EQ(table.add(accept2), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table[max], accept2); EXPECT_EQ(table.names().size(), 1); // add an entry that will cause 2 evictions EXPECT_EQ(table.add(accept3), true); EXPECT_EQ(table[1], accept3); EXPECT_EQ(table.size(), max - 1); // add a super huge header string bigvalue; bigvalue.append(capacity, 'x'); HPACKHeader bigheader("user-agent", bigvalue); EXPECT_EQ(table.add(bigheader), false); EXPECT_EQ(table.size(), 0); EXPECT_EQ(table.names().size(), 0); } TEST_F(HeaderTableTests, set_capacity) { HPACKHeader accept("accept-encoding", "gzip"); uint32_t max = 10; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); // fill the table for (size_t i = 0; i < max; i++) { EXPECT_EQ(table.add(accept), true); } // change capacity table.setCapacity(capacity / 2); EXPECT_EQ(table.size(), max / 2); EXPECT_EQ(table.bytes(), capacity / 2); } TEST_F(HeaderTableTests, comparison) { uint32_t capacity = 128; HeaderTable t1(capacity); HeaderTable t2(capacity); HPACKHeader h1("Content-Encoding", "gzip"); HPACKHeader h2("Content-Encoding", "deflate"); // different in number of elements t1.add(h1); EXPECT_FALSE(t1 == t2); // different in size (bytes) t2.add(h2); EXPECT_FALSE(t1 == t2); // make them the same t1.add(h2); t2.add(h1); EXPECT_TRUE(t1 == t2); // make them mismatch on refset t1.addReference(1); EXPECT_FALSE(t1 == t2); } TEST_F(HeaderTableTests, print) { stringstream out; HeaderTable t(128); t.add(HPACKHeader("Accept-Encoding", "gzip")); t.addReference(1); out << t; EXPECT_EQ(out.str(), "\n[1] (s=51) Accept-Encoding: gzip\nreference set: [1, ]\ntotal size: 51\n"); } TEST_F(HeaderTableTests, increaseCapacity) { HPACKHeader accept("accept-encoding", "gzip"); uint32_t max = 4; uint32_t capacity = accept.bytes() * max; HeaderTable table(capacity); EXPECT_GT(table.length(), max); // fill the table for (size_t i = 0; i < table.length() + 1; i++) { EXPECT_EQ(table.add(accept), true); } EXPECT_EQ(table.size(), max); EXPECT_EQ(table.getIndex(accept), 4); // head should be 0, tail should be 2 max = 8; table.setCapacity(accept.bytes() * max); EXPECT_GT(table.length(), max); // external index didn't change EXPECT_EQ(table.getIndex(accept), 4); } }
./CrossVul/dataset_final_sorted/CWE-416/cpp/bad_856_1
crossvul-cpp_data_bad_2968_0
// This may look like C code, but it is really -*- C++ -*- // // Copyright Bob Friesenhahn, 1999, 2000, 2001, 2002, 2003 // Copyright Dirk Lemstra 2013-2017 // // Implementation of Image // #define MAGICKCORE_IMPLEMENTATION 1 #define MAGICK_PLUSPLUS_IMPLEMENTATION 1 #include "Magick++/Include.h" #include <cstdlib> #include <string> #include <string.h> #include <errno.h> #include <math.h> using namespace std; #include "Magick++/Image.h" #include "Magick++/Functions.h" #include "Magick++/Pixels.h" #include "Magick++/Options.h" #include "Magick++/ImageRef.h" #define AbsoluteValue(x) ((x) < 0 ? -(x) : (x)) #define MagickPI 3.14159265358979323846264338327950288419716939937510 #define DegreesToRadians(x) (MagickPI*(x)/180.0) #define ThrowImageException ThrowPPException(quiet()) MagickPPExport const char *Magick::borderGeometryDefault="6x6+0+0"; MagickPPExport const char *Magick::frameGeometryDefault="25x25+6+6"; MagickPPExport const char *Magick::raiseGeometryDefault="6x6+0+0"; MagickPPExport int Magick::operator == (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels and signature are the same, then the image is identical return((left_.rows() == right_.rows()) && (left_.columns() == right_.columns()) && (left_.signature() == right_.signature())); } MagickPPExport int Magick::operator != (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ == right_)); } MagickPPExport int Magick::operator > (const Magick::Image &left_, const Magick::Image &right_) { return(!(left_ < right_) && (left_ != right_)); } MagickPPExport int Magick::operator < (const Magick::Image &left_, const Magick::Image &right_) { // If image pixels are less, then image is smaller return((left_.rows() * left_.columns()) < (right_.rows() * right_.columns())); } MagickPPExport int Magick::operator >= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ > right_) || (left_ == right_)); } MagickPPExport int Magick::operator <= (const Magick::Image &left_, const Magick::Image &right_) { return((left_ < right_) || ( left_ == right_)); } Magick::Image::Image(void) : _imgRef(new ImageRef) { } Magick::Image::Image(const Blob &blob_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(blob_); quiet(false); } catch (const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_, size_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,depth_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Blob &blob_,const Geometry &size_, const std::string &magick_) : _imgRef(new ImageRef) { try { // Read from Blob quiet(true); read(blob_,size_,magick_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Geometry &size_,const Color &color_) : _imgRef(new ImageRef) { // xc: prefix specifies an X11 color string std::string imageSpec("xc:"); imageSpec+=color_; try { quiet(true); // Set image size size(size_); // Initialize, Allocate and Read images read(imageSpec); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const Image &image_) : _imgRef(image_._imgRef) { _imgRef->increase(); } Magick::Image::Image(const Image &image_,const Geometry &geometry_) : _imgRef(new ImageRef) { const RectangleInfo geometry=geometry_; OffsetInfo offset; MagickCore::Image *image; GetPPException; image=CloneImage(image_.constImage(),geometry_.width(),geometry_.height(), MagickTrue,exceptionInfo); replaceImage(image); _imgRef->options(new Options(*image_.constOptions())); offset.x=0; offset.y=0; (void) CopyImagePixels(image,image_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } Magick::Image::Image(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) : _imgRef(new ImageRef) { try { quiet(true); read(width_,height_,map_.c_str(),type_,pixels_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::Image(const std::string &imageSpec_) : _imgRef(new ImageRef) { try { // Initialize, Allocate and Read images quiet(true); read(imageSpec_); quiet(false); } catch(const Error&) { // Release resources delete _imgRef; throw; } } Magick::Image::~Image() { try { if (_imgRef->decrease() == 0) delete _imgRef; } catch(Magick::Exception) { } _imgRef=(Magick::ImageRef *) NULL; } Magick::Image& Magick::Image::operator=(const Magick::Image &image_) { if (this != &image_) { image_._imgRef->increase(); if (_imgRef->decrease() == 0) delete _imgRef; // Use new image reference _imgRef=image_._imgRef; } return(*this); } void Magick::Image::adjoin(const bool flag_) { modifyImage(); options()->adjoin(flag_); } bool Magick::Image::adjoin(void) const { return(constOptions()->adjoin()); } void Magick::Image::alpha(const bool alphaFlag_) { modifyImage(); // If matte channel is requested, but image doesn't already have a // matte channel, then create an opaque matte channel. Likewise, if // the image already has a matte channel but a matte channel is not // desired, then set the matte channel to opaque. GetPPException; if ((alphaFlag_ && !constImage()->alpha_trait) || (constImage()->alpha_trait && !alphaFlag_)) SetImageAlpha(image(),OpaqueAlpha,exceptionInfo); ThrowImageException; image()->alpha_trait=alphaFlag_ ? BlendPixelTrait : UndefinedPixelTrait; } bool Magick::Image::alpha(void) const { if (constImage()->alpha_trait == BlendPixelTrait) return(true); else return(false); } void Magick::Image::matteColor(const Color &matteColor_) { modifyImage(); if (matteColor_.isValid()) { image()->matte_color=matteColor_; options()->matteColor(matteColor_); } else { // Set to default matte color Color tmpColor("#BDBDBD"); image()->matte_color=tmpColor; options()->matteColor(tmpColor); } } Magick::Color Magick::Image::matteColor(void) const { return(Color(constImage()->matte_color)); } void Magick::Image::animationDelay(const size_t delay_) { modifyImage(); image()->delay=delay_; } size_t Magick::Image::animationDelay(void) const { return(constImage()->delay); } void Magick::Image::animationIterations(const size_t iterations_) { modifyImage(); image()->iterations=iterations_; } size_t Magick::Image::animationIterations(void) const { return(constImage()->iterations); } void Magick::Image::attenuate(const double attenuate_) { char value[MagickPathExtent]; modifyImage(); FormatLocaleString(value,MagickPathExtent,"%.20g",attenuate_); (void) SetImageArtifact(image(),"attenuate",value); } void Magick::Image::backgroundColor(const Color &backgroundColor_) { modifyImage(); if (backgroundColor_.isValid()) image()->background_color=backgroundColor_; else image()->background_color=Color(); options()->backgroundColor(backgroundColor_); } Magick::Color Magick::Image::backgroundColor(void) const { return(constOptions()->backgroundColor()); } void Magick::Image::backgroundTexture(const std::string &backgroundTexture_) { modifyImage(); options()->backgroundTexture(backgroundTexture_); } std::string Magick::Image::backgroundTexture(void) const { return(constOptions()->backgroundTexture()); } size_t Magick::Image::baseColumns(void) const { return(constImage()->magick_columns); } std::string Magick::Image::baseFilename(void) const { return(std::string(constImage()->magick_filename)); } size_t Magick::Image::baseRows(void) const { return(constImage()->magick_rows); } void Magick::Image::blackPointCompensation(const bool flag_) { image()->black_point_compensation=(MagickBooleanType) flag_; } bool Magick::Image::blackPointCompensation(void) const { return(static_cast<bool>(constImage()->black_point_compensation)); } void Magick::Image::borderColor(const Color &borderColor_) { modifyImage(); if (borderColor_.isValid()) image()->border_color=borderColor_; else image()->border_color=Color(); options()->borderColor(borderColor_); } Magick::Color Magick::Image::borderColor(void) const { return(constOptions()->borderColor()); } Magick::Geometry Magick::Image::boundingBox(void) const { RectangleInfo bbox; GetPPException; bbox=GetImageBoundingBox(constImage(),exceptionInfo); ThrowImageException; return(Geometry(bbox)); } void Magick::Image::boxColor(const Color &boxColor_) { modifyImage(); options()->boxColor(boxColor_); } Magick::Color Magick::Image::boxColor(void) const { return(constOptions()->boxColor()); } void Magick::Image::channelDepth(const ChannelType channel_, const size_t depth_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); SetImageDepth(image(),depth_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } size_t Magick::Image::channelDepth(const ChannelType channel_) { size_t channel_depth; GetPPException; GetAndSetPPChannelMask(channel_); channel_depth=GetImageDepth(constImage(),exceptionInfo); RestorePPChannelMask; ThrowImageException; return(channel_depth); } size_t Magick::Image::channels() const { return(constImage()->number_channels); } void Magick::Image::classType(const ClassType class_) { if (classType() == PseudoClass && class_ == DirectClass) { // Use SyncImage to synchronize the DirectClass pixels with the // color map and then set to DirectClass type. modifyImage(); GetPPException; SyncImage(image(),exceptionInfo); ThrowImageException; image()->colormap=(PixelInfo *)RelinquishMagickMemory(image()->colormap); image()->storage_class=static_cast<MagickCore::ClassType>(DirectClass); return; } if (classType() == DirectClass && class_ == PseudoClass) { // Quantize to create PseudoClass color map modifyImage(); quantizeColors(MaxColormapSize); quantize(); image()->storage_class=static_cast<MagickCore::ClassType>(PseudoClass); } } Magick::ClassType Magick::Image::classType(void) const { return static_cast<Magick::ClassType>(constImage()->storage_class); } void Magick::Image::colorFuzz(const double fuzz_) { modifyImage(); image()->fuzz=fuzz_; options()->colorFuzz(fuzz_); } double Magick::Image::colorFuzz(void) const { return(constOptions()->colorFuzz()); } void Magick::Image::colorMapSize(const size_t entries_) { if (entries_ >MaxColormapSize) throwExceptionExplicit(MagickCore::OptionError, "Colormap entries must not exceed MaxColormapSize"); modifyImage(); GetPPException; (void) AcquireImageColormap(image(),entries_,exceptionInfo); ThrowImageException; } size_t Magick::Image::colorMapSize(void) const { if (!constImage()->colormap) throwExceptionExplicit(MagickCore::OptionError, "Image does not contain a colormap"); return(constImage()->colors); } void Magick::Image::colorSpace(const ColorspaceType colorSpace_) { if (image()->colorspace == colorSpace_) return; modifyImage(); GetPPException; TransformImageColorspace(image(),colorSpace_,exceptionInfo); ThrowImageException; } Magick::ColorspaceType Magick::Image::colorSpace(void) const { return (constImage()->colorspace); } void Magick::Image::colorSpaceType(const ColorspaceType colorSpace_) { modifyImage(); GetPPException; SetImageColorspace(image(),colorSpace_,exceptionInfo); ThrowImageException; options()->colorspaceType(colorSpace_); } Magick::ColorspaceType Magick::Image::colorSpaceType(void) const { return(constOptions()->colorspaceType()); } size_t Magick::Image::columns(void) const { return(constImage()->columns); } void Magick::Image::comment(const std::string &comment_) { modifyImage(); GetPPException; SetImageProperty(image(),"Comment",NULL,exceptionInfo); if (comment_.length() > 0) SetImageProperty(image(),"Comment",comment_.c_str(),exceptionInfo); ThrowImageException; } std::string Magick::Image::comment(void) const { const char *value; GetPPException; value=GetImageProperty(constImage(),"Comment",exceptionInfo); ThrowImageException; if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::compose(const CompositeOperator compose_) { image()->compose=compose_; } Magick::CompositeOperator Magick::Image::compose(void) const { return(constImage()->compose); } void Magick::Image::compressType(const CompressionType compressType_) { modifyImage(); image()->compression=compressType_; options()->compressType(compressType_); } Magick::CompressionType Magick::Image::compressType(void) const { return(constImage()->compression); } void Magick::Image::debug(const bool flag_) { modifyImage(); options()->debug(flag_); } bool Magick::Image::debug(void) const { return(constOptions()->debug()); } void Magick::Image::density(const Point &density_) { modifyImage(); options()->density(density_); if (density_.isValid()) { image()->resolution.x=density_.x(); if (density_.y() != 0.0) image()->resolution.y=density_.y(); else image()->resolution.y=density_.x(); } else { // Reset to default image()->resolution.x=0.0; image()->resolution.y=0.0; } } Magick::Point Magick::Image::density(void) const { if (isValid()) { ssize_t x_resolution=72, y_resolution=72; if (constImage()->resolution.x > 0.0) x_resolution=constImage()->resolution.x; if (constImage()->resolution.y > 0.0) y_resolution=constImage()->resolution.y; return(Point(x_resolution,y_resolution)); } return(constOptions()->density()); } void Magick::Image::depth(const size_t depth_) { size_t depth = depth_; if (depth > MAGICKCORE_QUANTUM_DEPTH) depth=MAGICKCORE_QUANTUM_DEPTH; modifyImage(); image()->depth=depth; options()->depth(depth); } size_t Magick::Image::depth(void) const { return(constImage()->depth); } std::string Magick::Image::directory(void) const { if (constImage()->directory) return(std::string(constImage()->directory)); if (!quiet()) throwExceptionExplicit(MagickCore::CorruptImageWarning, "Image does not contain a directory"); return(std::string()); } void Magick::Image::endian(const Magick::EndianType endian_) { modifyImage(); options()->endian(endian_); image()->endian=endian_; } Magick::EndianType Magick::Image::endian(void) const { return(constImage()->endian); } void Magick::Image::exifProfile(const Magick::Blob &exifProfile_) { modifyImage(); if (exifProfile_.data() != 0) { StringInfo *exif_profile; exif_profile=AcquireStringInfo(exifProfile_.length()); SetStringInfoDatum(exif_profile,(unsigned char *) exifProfile_.data()); GetPPException; (void) SetImageProfile(image(),"exif",exif_profile,exceptionInfo); exif_profile=DestroyStringInfo(exif_profile); ThrowImageException; } } Magick::Blob Magick::Image::exifProfile(void) const { const StringInfo *exif_profile; exif_profile=GetImageProfile(constImage(),"exif"); if (exif_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(exif_profile), GetStringInfoLength(exif_profile))); } void Magick::Image::fileName(const std::string &fileName_) { modifyImage(); fileName_.copy(image()->filename,sizeof(image()->filename)-1); image()->filename[fileName_.length()]=0; // Null terminate options()->fileName(fileName_); } std::string Magick::Image::fileName(void) const { return(constOptions()->fileName()); } MagickCore::MagickSizeType Magick::Image::fileSize(void) const { return(GetBlobSize(constImage())); } void Magick::Image::fillColor(const Magick::Color &fillColor_) { modifyImage(); options()->fillColor(fillColor_); } Magick::Color Magick::Image::fillColor(void) const { return(constOptions()->fillColor()); } void Magick::Image::fillRule(const Magick::FillRule &fillRule_) { modifyImage(); options()->fillRule(fillRule_); } Magick::FillRule Magick::Image::fillRule(void) const { return constOptions()->fillRule(); } void Magick::Image::fillPattern(const Image &fillPattern_) { modifyImage(); if (fillPattern_.isValid()) options()->fillPattern(fillPattern_.constImage()); else options()->fillPattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::fillPattern(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->fillPattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::filterType(const Magick::FilterType filterType_) { modifyImage(); image()->filter=filterType_; } Magick::FilterType Magick::Image::filterType(void) const { return(constImage()->filter); } void Magick::Image::font(const std::string &font_) { modifyImage(); options()->font(font_); } std::string Magick::Image::font(void) const { return(constOptions()->font()); } void Magick::Image::fontFamily(const std::string &family_) { modifyImage(); options()->fontFamily(family_); } std::string Magick::Image::fontFamily(void) const { return(constOptions()->fontFamily()); } void Magick::Image::fontPointsize(const double pointSize_) { modifyImage(); options()->fontPointsize(pointSize_); } double Magick::Image::fontPointsize(void) const { return(constOptions()->fontPointsize()); } void Magick::Image::fontStyle(const StyleType pointSize_) { modifyImage(); options()->fontStyle(pointSize_); } Magick::StyleType Magick::Image::fontStyle(void) const { return(constOptions()->fontStyle()); } void Magick::Image::fontWeight(const size_t weight_) { modifyImage(); options()->fontWeight(weight_); } size_t Magick::Image::fontWeight(void) const { return(constOptions()->fontWeight()); } std::string Magick::Image::format(void) const { const MagickInfo *magick_info; GetPPException; magick_info=GetMagickInfo(constImage()->magick,exceptionInfo); ThrowImageException; if ((magick_info != 0) && (*magick_info->description != '\0')) return(std::string(magick_info->description)); if (!quiet()) throwExceptionExplicit(MagickCore::CorruptImageWarning, "Unrecognized image magick type"); return(std::string()); } std::string Magick::Image::formatExpression(const std::string expression) { char *text; std::string text_string; GetPPException; modifyImage(); text=InterpretImageProperties(imageInfo(),image(),expression.c_str(), exceptionInfo); if (text != (char *) NULL) { text_string=std::string(text); text=DestroyString(text); } ThrowImageException; return(text_string); } double Magick::Image::gamma(void) const { return(constImage()->gamma); } Magick::Geometry Magick::Image::geometry(void) const { if (constImage()->geometry) return Geometry(constImage()->geometry); if (!quiet()) throwExceptionExplicit(MagickCore::OptionWarning, "Image does not contain a geometry"); return(Geometry()); } void Magick::Image::gifDisposeMethod( const MagickCore::DisposeType disposeMethod_) { modifyImage(); image()->dispose=disposeMethod_; } MagickCore::DisposeType Magick::Image::gifDisposeMethod(void) const { return(constImage()->dispose); } bool Magick::Image::hasChannel(const PixelChannel channel) const { if (GetPixelChannelTraits(constImage(),channel) == UndefinedPixelTrait) return(false); if (channel == GreenPixelChannel || channel == BluePixelChannel) return (GetPixelChannelOffset(constImage(),channel) == (ssize_t)channel); return(true); } void Magick::Image::highlightColor(const Color color_) { std::string value; value=color_; artifact("compare:highlight-color",value); } void Magick::Image::iccColorProfile(const Magick::Blob &colorProfile_) { profile("icc",colorProfile_); } Magick::Blob Magick::Image::iccColorProfile(void) const { const StringInfo *color_profile; color_profile=GetImageProfile(constImage(),"icc"); if (color_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(color_profile),GetStringInfoLength( color_profile))); } void Magick::Image::interlaceType(const Magick::InterlaceType interlace_) { modifyImage(); image()->interlace=interlace_; options()->interlaceType(interlace_); } Magick::InterlaceType Magick::Image::interlaceType(void) const { return(constImage()->interlace); } void Magick::Image::interpolate(const PixelInterpolateMethod interpolate_) { modifyImage(); image()->interpolate=interpolate_; } Magick::PixelInterpolateMethod Magick::Image::interpolate(void) const { return constImage()->interpolate; } void Magick::Image::iptcProfile(const Magick::Blob &iptcProfile_) { modifyImage(); if (iptcProfile_.data() != 0) { StringInfo *iptc_profile; iptc_profile=AcquireStringInfo(iptcProfile_.length()); SetStringInfoDatum(iptc_profile,(unsigned char *) iptcProfile_.data()); GetPPException; (void) SetImageProfile(image(),"iptc",iptc_profile,exceptionInfo); iptc_profile=DestroyStringInfo(iptc_profile); ThrowImageException; } } Magick::Blob Magick::Image::iptcProfile(void) const { const StringInfo *iptc_profile; iptc_profile=GetImageProfile(constImage(),"iptc"); if (iptc_profile == (StringInfo *) NULL) return(Blob()); return(Blob(GetStringInfoDatum(iptc_profile),GetStringInfoLength( iptc_profile))); } bool Magick::Image::isOpaque(void) const { MagickBooleanType result; GetPPException; result=IsImageOpaque(constImage(),exceptionInfo); ThrowImageException; return(result != MagickFalse ? true : false); } void Magick::Image::isValid(const bool isValid_) { if (!isValid_) { delete _imgRef; _imgRef=new ImageRef; } else if (!isValid()) { // Construct with single-pixel black image to make // image valid. This is an obvious hack. size(Geometry(1,1)); read("xc:black"); } } bool Magick::Image::isValid(void) const { return rows() && columns(); } void Magick::Image::label(const std::string &label_) { modifyImage(); GetPPException; (void) SetImageProperty(image(),"Label",NULL,exceptionInfo); if (label_.length() > 0) (void) SetImageProperty(image(),"Label",label_.c_str(),exceptionInfo); ThrowImageException; } std::string Magick::Image::label(void) const { const char *value; GetPPException; value=GetImageProperty(constImage(),"Label",exceptionInfo); ThrowImageException; if (value) return(std::string(value)); return(std::string()); } void Magick::Image::lowlightColor(const Color color_) { std::string value; value=color_; artifact("compare:lowlight-color",value); } void Magick::Image::magick(const std::string &magick_) { size_t length; modifyImage(); length=sizeof(image()->magick)-1; if (magick_.length() < length) length=magick_.length(); if (!magick_.empty()) magick_.copy(image()->magick,length); image()->magick[length]=0; options()->magick(magick_); } std::string Magick::Image::magick(void) const { if (*(constImage()->magick) != '\0') return(std::string(constImage()->magick)); return(constOptions()->magick()); } void Magick::Image::masklightColor(const Color color_) { std::string value; value=color_; artifact("compare:masklight-color",value); } double Magick::Image::meanErrorPerPixel(void) const { return(constImage()->error.mean_error_per_pixel); } void Magick::Image::modulusDepth(const size_t depth_) { modifyImage(); GetPPException; SetImageDepth(image(),depth_,exceptionInfo); ThrowImageException; options()->depth(depth_); } size_t Magick::Image::modulusDepth(void) const { size_t depth; GetPPException; depth=GetImageDepth(constImage(),exceptionInfo); ThrowImageException; return(depth); } void Magick::Image::monochrome(const bool monochromeFlag_) { modifyImage(); options()->monochrome(monochromeFlag_); } bool Magick::Image::monochrome(void) const { return(constOptions()->monochrome()); } Magick::Geometry Magick::Image::montageGeometry(void) const { if (constImage()->montage) return Magick::Geometry(constImage()->montage); if (!quiet()) throwExceptionExplicit(MagickCore::CorruptImageWarning, "Image does not contain a montage"); return(Magick::Geometry()); } double Magick::Image::normalizedMaxError(void) const { return(constImage()->error.normalized_maximum_error); } double Magick::Image::normalizedMeanError(void) const { return(constImage()->error.normalized_mean_error); } void Magick::Image::orientation(const Magick::OrientationType orientation_) { modifyImage(); image()->orientation=orientation_; } Magick::OrientationType Magick::Image::orientation(void) const { return(constImage()->orientation); } void Magick::Image::page(const Magick::Geometry &pageSize_) { modifyImage(); options()->page(pageSize_); image()->page=pageSize_; } Magick::Geometry Magick::Image::page(void) const { return(Geometry(constImage()->page.width,constImage()->page.height, constImage()->page.x,constImage()->page.y)); } void Magick::Image::quality(const size_t quality_) { modifyImage(); image()->quality=quality_; options()->quality(quality_); } size_t Magick::Image::quality(void) const { return(constImage()->quality); } void Magick::Image::quantizeColors(const size_t colors_) { modifyImage(); options()->quantizeColors(colors_); } size_t Magick::Image::quantizeColors(void) const { return(constOptions()->quantizeColors()); } void Magick::Image::quantizeColorSpace( const Magick::ColorspaceType colorSpace_) { modifyImage(); options()->quantizeColorSpace(colorSpace_); } Magick::ColorspaceType Magick::Image::quantizeColorSpace(void) const { return(constOptions()->quantizeColorSpace()); } void Magick::Image::quantizeDither(const bool ditherFlag_) { modifyImage(); options()->quantizeDither(ditherFlag_); } bool Magick::Image::quantizeDither(void) const { return(constOptions()->quantizeDither()); } void Magick::Image::quantizeDitherMethod(const DitherMethod ditherMethod_) { modifyImage(); options()->quantizeDitherMethod(ditherMethod_); } MagickCore::DitherMethod Magick::Image::quantizeDitherMethod(void) const { return(constOptions()->quantizeDitherMethod()); } void Magick::Image::quantizeTreeDepth(const size_t treeDepth_) { modifyImage(); options()->quantizeTreeDepth(treeDepth_); } size_t Magick::Image::quantizeTreeDepth() const { return(constOptions()->quantizeTreeDepth()); } void Magick::Image::quiet(const bool quiet_) { modifyImage(); options()->quiet(quiet_); } bool Magick::Image::quiet(void) const { return(constOptions()->quiet()); } void Magick::Image::renderingIntent( const Magick::RenderingIntent renderingIntent_) { modifyImage(); image()->rendering_intent=renderingIntent_; } Magick::RenderingIntent Magick::Image::renderingIntent(void) const { return(static_cast<Magick::RenderingIntent>(constImage()->rendering_intent)); } void Magick::Image::resolutionUnits( const Magick::ResolutionType resolutionUnits_) { modifyImage(); image()->units=resolutionUnits_; options()->resolutionUnits(resolutionUnits_); } Magick::ResolutionType Magick::Image::resolutionUnits(void) const { return(static_cast<Magick::ResolutionType>(constImage()->units)); } size_t Magick::Image::rows(void) const { return(constImage()->rows); } void Magick::Image::scene(const size_t scene_) { modifyImage(); image()->scene=scene_; } size_t Magick::Image::scene(void) const { return(constImage()->scene); } void Magick::Image::size(const Geometry &geometry_) { modifyImage(); options()->size(geometry_); image()->rows=geometry_.height(); image()->columns=geometry_.width(); } Magick::Geometry Magick::Image::size(void) const { return(Magick::Geometry(constImage()->columns,constImage()->rows)); } void Magick::Image::strokeAntiAlias(const bool flag_) { modifyImage(); options()->strokeAntiAlias(flag_); } bool Magick::Image::strokeAntiAlias(void) const { return(constOptions()->strokeAntiAlias()); } void Magick::Image::strokeColor(const Magick::Color &strokeColor_) { std::string value; modifyImage(); options()->strokeColor(strokeColor_); value=strokeColor_; artifact("stroke",value); } Magick::Color Magick::Image::strokeColor(void) const { return(constOptions()->strokeColor()); } void Magick::Image::strokeDashArray(const double *strokeDashArray_) { modifyImage(); options()->strokeDashArray(strokeDashArray_); } const double* Magick::Image::strokeDashArray(void) const { return(constOptions()->strokeDashArray()); } void Magick::Image::strokeDashOffset(const double strokeDashOffset_) { modifyImage(); options()->strokeDashOffset(strokeDashOffset_); } double Magick::Image::strokeDashOffset(void) const { return(constOptions()->strokeDashOffset()); } void Magick::Image::strokeLineCap(const Magick::LineCap lineCap_) { modifyImage(); options()->strokeLineCap(lineCap_); } Magick::LineCap Magick::Image::strokeLineCap(void) const { return(constOptions()->strokeLineCap()); } void Magick::Image::strokeLineJoin(const Magick::LineJoin lineJoin_) { modifyImage(); options()->strokeLineJoin(lineJoin_); } Magick::LineJoin Magick::Image::strokeLineJoin(void) const { return(constOptions()->strokeLineJoin()); } void Magick::Image::strokeMiterLimit(const size_t strokeMiterLimit_) { modifyImage(); options()->strokeMiterLimit(strokeMiterLimit_); } size_t Magick::Image::strokeMiterLimit(void) const { return(constOptions()->strokeMiterLimit()); } void Magick::Image::strokePattern(const Image &strokePattern_) { modifyImage(); if(strokePattern_.isValid()) options()->strokePattern(strokePattern_.constImage()); else options()->strokePattern(static_cast<MagickCore::Image*>(NULL)); } Magick::Image Magick::Image::strokePattern(void) const { // FIXME: This is inordinately innefficient const MagickCore::Image *tmpTexture; Image texture; tmpTexture=constOptions()->strokePattern(); if (tmpTexture) { MagickCore::Image *image; GetPPException; image=CloneImage(tmpTexture,0,0,MagickTrue,exceptionInfo); texture.replaceImage(image); ThrowImageException; } return(texture); } void Magick::Image::strokeWidth(const double strokeWidth_) { char value[MagickPathExtent]; modifyImage(); options()->strokeWidth(strokeWidth_); FormatLocaleString(value,MagickPathExtent,"%.20g",strokeWidth_); (void) SetImageArtifact(image(),"strokewidth",value); } double Magick::Image::strokeWidth(void) const { return(constOptions()->strokeWidth()); } void Magick::Image::subImage(const size_t subImage_) { modifyImage(); options()->subImage(subImage_); } size_t Magick::Image::subImage(void) const { return(constOptions()->subImage()); } void Magick::Image::subRange(const size_t subRange_) { modifyImage(); options()->subRange(subRange_); } size_t Magick::Image::subRange(void) const { return(constOptions()->subRange()); } void Magick::Image::textAntiAlias(const bool flag_) { modifyImage(); options()->textAntiAlias(flag_); } bool Magick::Image::textAntiAlias(void) const { return(constOptions()->textAntiAlias()); } void Magick::Image::textDirection(DirectionType direction_) { modifyImage(); options()->textDirection(direction_); } Magick::DirectionType Magick::Image::textDirection(void) const { return(constOptions()->textDirection()); } void Magick::Image::textEncoding(const std::string &encoding_) { modifyImage(); options()->textEncoding(encoding_); } std::string Magick::Image::textEncoding(void) const { return(constOptions()->textEncoding()); } void Magick::Image::textGravity(GravityType gravity_) { modifyImage(); options()->textGravity(gravity_); } Magick::GravityType Magick::Image::textGravity(void) const { return(constOptions()->textGravity()); } void Magick::Image::textInterlineSpacing(double spacing_) { modifyImage(); options()->textInterlineSpacing(spacing_); } double Magick::Image::textInterlineSpacing(void) const { return(constOptions()->textInterlineSpacing()); } void Magick::Image::textInterwordSpacing(double spacing_) { modifyImage(); options()->textInterwordSpacing(spacing_); } double Magick::Image::textInterwordSpacing(void) const { return(constOptions()->textInterwordSpacing()); } void Magick::Image::textKerning(double kerning_) { modifyImage(); options()->textKerning(kerning_); } double Magick::Image::textKerning(void) const { return(constOptions()->textKerning()); } void Magick::Image::textUnderColor(const Color &underColor_) { modifyImage(); options()->textUnderColor(underColor_); } Magick::Color Magick::Image::textUnderColor(void) const { return(constOptions()->textUnderColor()); } size_t Magick::Image::totalColors(void) const { size_t colors; GetPPException; colors=GetNumberColors(constImage(),(FILE *) NULL,exceptionInfo); ThrowImageException; return colors; } void Magick::Image::transformRotation(const double angle_) { modifyImage(); options()->transformRotation(angle_); } void Magick::Image::transformSkewX(const double skewx_) { modifyImage(); options()->transformSkewX(skewx_); } void Magick::Image::transformSkewY(const double skewy_) { modifyImage(); options()->transformSkewY(skewy_); } Magick::ImageType Magick::Image::type(void) const { if (constOptions()->type() != UndefinedType) return(constOptions()->type()); return(GetImageType(constImage())); } void Magick::Image::type(const Magick::ImageType type_) { modifyImage(); options()->type(type_); GetPPException; SetImageType(image(),type_,exceptionInfo); ThrowImageException; } void Magick::Image::verbose(const bool verboseFlag_) { modifyImage(); options()->verbose(verboseFlag_); } bool Magick::Image::verbose(void) const { return(constOptions()->verbose()); } void Magick::Image::virtualPixelMethod( const VirtualPixelMethod virtualPixelMethod_) { modifyImage(); GetPPException; SetImageVirtualPixelMethod(image(),virtualPixelMethod_,exceptionInfo); ThrowImageException; } Magick::VirtualPixelMethod Magick::Image::virtualPixelMethod(void) const { return(GetImageVirtualPixelMethod(constImage())); } void Magick::Image::x11Display(const std::string &display_) { modifyImage(); options()->x11Display(display_); } std::string Magick::Image::x11Display(void) const { return(constOptions()->x11Display()); } double Magick::Image::xResolution(void) const { return(constImage()->resolution.x); } double Magick::Image::yResolution(void) const { return(constImage()->resolution.y); } void Magick::Image::adaptiveBlur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveBlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveResize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=AdaptiveResizeImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveSharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveSharpenChannel(const ChannelType channel_, const double radius_,const double sigma_ ) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=AdaptiveSharpenImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::adaptiveThreshold(const size_t width_,const size_t height_, const double bias_) { MagickCore::Image *newImage; GetPPException; newImage=AdaptiveThresholdImage(constImage(),width_,height_,bias_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoise(const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; newImage=AddNoiseImage(constImage(),noiseType_,1.0,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::addNoiseChannel(const ChannelType channel_, const NoiseType noiseType_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=AddNoiseImage(constImage(),noiseType_,1.0,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::affineTransform(const DrawableAffine &affine_) { AffineMatrix _affine; MagickCore::Image *newImage; _affine.sx=affine_.sx(); _affine.sy=affine_.sy(); _affine.rx=affine_.rx(); _affine.ry=affine_.ry(); _affine.tx=affine_.tx(); _affine.ty=affine_.ty(); GetPPException; newImage=AffineTransformImage(constImage(),&_affine,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::alpha(const unsigned int alpha_) { modifyImage(); GetPPException; SetImageAlpha(image(),alpha_,exceptionInfo); ThrowImageException; } void Magick::Image::alphaChannel(AlphaChannelOption alphaOption_) { modifyImage(); GetPPException; SetImageAlphaChannel(image(),alphaOption_,exceptionInfo); ThrowImageException; } void Magick::Image::annotate(const std::string &text_, const Geometry &location_) { annotate(text_,location_,NorthWestGravity,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_) { annotate(text_,boundingArea_,gravity_,0.0); } void Magick::Image::annotate(const std::string &text_, const Geometry &boundingArea_,const GravityType gravity_, const double degrees_) { AffineMatrix oaffine; char boundingArea[MagickPathExtent]; DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->geometry=DestroyString(drawInfo->geometry); if (boundingArea_.isValid()) { if (boundingArea_.width() == 0 || boundingArea_.height() == 0) { FormatLocaleString(boundingArea,MagickPathExtent,"%+.20g%+.20g", (double) boundingArea_.xOff(),(double) boundingArea_.yOff()); } else { (void) CopyMagickString(boundingArea, std::string(boundingArea_).c_str(), MagickPathExtent); } drawInfo->geometry=boundingArea; } drawInfo->gravity=gravity_; oaffine=drawInfo->affine; if (degrees_ != 0.0) { AffineMatrix affine, current; affine.sx=1.0; affine.rx=0.0; affine.ry=0.0; affine.sy=1.0; affine.tx=0.0; affine.ty=0.0; current=drawInfo->affine; affine.sx=cos(DegreesToRadians(fmod(degrees_,360.0))); affine.rx=sin(DegreesToRadians(fmod(degrees_,360.0))); affine.ry=(-sin(DegreesToRadians(fmod(degrees_,360.0)))); affine.sy=cos(DegreesToRadians(fmod(degrees_,360.0))); drawInfo->affine.sx=current.sx*affine.sx+current.ry*affine.rx; drawInfo->affine.rx=current.rx*affine.sx+current.sy*affine.rx; drawInfo->affine.ry=current.sx*affine.ry+current.ry*affine.sy; drawInfo->affine.sy=current.rx*affine.ry+current.sy*affine.sy; drawInfo->affine.tx=current.sx*affine.tx+current.ry*affine.ty +current.tx; } GetPPException; AnnotateImage(image(),drawInfo,exceptionInfo); // Restore original values drawInfo->affine=oaffine; drawInfo->text=(char *) NULL; drawInfo->geometry=(char *) NULL; ThrowImageException; } void Magick::Image::annotate(const std::string &text_, const GravityType gravity_) { DrawInfo *drawInfo; modifyImage(); drawInfo=options()->drawInfo(); drawInfo->text=DestroyString(drawInfo->text); drawInfo->text=const_cast<char *>(text_.c_str()); drawInfo->gravity=gravity_; GetPPException; AnnotateImage(image(),drawInfo,exceptionInfo); drawInfo->gravity=NorthWestGravity; drawInfo->text=(char *) NULL; ThrowImageException; } void Magick::Image::artifact(const std::string &name_,const std::string &value_) { modifyImage(); (void) SetImageArtifact(image(),name_.c_str(),value_.c_str()); } std::string Magick::Image::artifact(const std::string &name_) const { const char *value; value=GetImageArtifact(constImage(),name_.c_str()); if (value) return(std::string(value)); return(std::string()); } void Magick::Image::attribute(const std::string name_,const char *value_) { modifyImage(); GetPPException; SetImageProperty(image(),name_.c_str(),value_,exceptionInfo); ThrowImageException; } void Magick::Image::attribute(const std::string name_,const std::string value_) { modifyImage(); GetPPException; SetImageProperty(image(),name_.c_str(),value_.c_str(),exceptionInfo); ThrowImageException; } std::string Magick::Image::attribute(const std::string name_) const { const char *value; GetPPException; value=GetImageProperty(constImage(),name_.c_str(),exceptionInfo); ThrowImageException; if (value) return(std::string(value)); return(std::string()); // Intentionally no exception } void Magick::Image::autoGamma(void) { modifyImage(); GetPPException; (void) SyncImageSettings(imageInfo(),image(),exceptionInfo); (void) AutoGammaImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::autoGammaChannel(const ChannelType channel_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) SyncImageSettings(imageInfo(),image(),exceptionInfo); (void) AutoGammaImage(image(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::autoLevel(void) { modifyImage(); GetPPException; (void) AutoLevelImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::autoLevelChannel(const ChannelType channel_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) AutoLevelImage(image(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::autoOrient(void) { MagickCore::Image *newImage; if (image()->orientation == UndefinedOrientation || image()->orientation == TopLeftOrientation) return; GetPPException; newImage=AutoOrientImage(constImage(),image()->orientation,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::autoThreshold(const AutoThresholdMethod method_) { modifyImage(); GetPPException; AutoThresholdImage(image(),method_, exceptionInfo); ThrowImageException; } void Magick::Image::blackThreshold(const std::string &threshold_) { modifyImage(); GetPPException; BlackThresholdImage(image(),threshold_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::blackThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); BlackThresholdImage(image(),threshold_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::blueShift(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=BlueShiftImage(constImage(),factor_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=BlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::blurChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=BlurImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::border(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo borderInfo=geometry_; GetPPException; newImage=BorderImage(constImage(),&borderInfo,image()->compose, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::brightnessContrast(const double brightness_, const double contrast_) { modifyImage(); GetPPException; BrightnessContrastImage(image(),brightness_,contrast_,exceptionInfo); ThrowImageException; } void Magick::Image::brightnessContrastChannel(const ChannelType channel_, const double brightness_,const double contrast_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); BrightnessContrastImage(image(),brightness_,contrast_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::cannyEdge(const double radius_,const double sigma_, const double lowerPercent_,const double upperPercent_) { MagickCore::Image *newImage; modifyImage(); GetPPException; newImage=CannyEdgeImage(constImage(),radius_,sigma_,lowerPercent_, upperPercent_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cdl(const std::string &cdl_) { modifyImage(); GetPPException; (void) ColorDecisionListImage(image(),cdl_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::channel(const ChannelType channel_) { MagickCore::Image *newImage; GetPPException; newImage=SeparateImage(image(),channel_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::charcoal(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=CharcoalImage(image(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::charcoalChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=CharcoalImage(image(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::chop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo chopInfo=geometry_; GetPPException; newImage=ChopImage(image(),&chopInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::chromaBluePrimary(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.blue_primary.x=x_; image()->chromaticity.blue_primary.y=y_; image()->chromaticity.blue_primary.z=z_; } void Magick::Image::chromaBluePrimary(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.blue_primary.x; *y_=constImage()->chromaticity.blue_primary.y; *z_=constImage()->chromaticity.blue_primary.z; } void Magick::Image::chromaGreenPrimary(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.green_primary.x=x_; image()->chromaticity.green_primary.y=y_; image()->chromaticity.green_primary.z=z_; } void Magick::Image::chromaGreenPrimary(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.green_primary.x; *y_=constImage()->chromaticity.green_primary.y; *z_=constImage()->chromaticity.green_primary.z; } void Magick::Image::chromaRedPrimary(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.red_primary.x=x_; image()->chromaticity.red_primary.y=y_; image()->chromaticity.red_primary.z=z_; } void Magick::Image::chromaRedPrimary(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.red_primary.x; *y_=constImage()->chromaticity.red_primary.y; *z_=constImage()->chromaticity.red_primary.z; } void Magick::Image::chromaWhitePoint(const double x_,const double y_, const double z_) { modifyImage(); image()->chromaticity.white_point.x=x_; image()->chromaticity.white_point.y=y_; image()->chromaticity.white_point.z=z_; } void Magick::Image::chromaWhitePoint(double *x_,double *y_,double *z_) const { *x_=constImage()->chromaticity.white_point.x; *y_=constImage()->chromaticity.white_point.y; *z_=constImage()->chromaticity.white_point.z; } void Magick::Image::clamp(void) { modifyImage(); GetPPException; ClampImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::clampChannel(const ChannelType channel_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); ClampImage(image(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::clip(void) { modifyImage(); GetPPException; ClipImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::clipPath(const std::string pathname_,const bool inside_) { modifyImage(); GetPPException; ClipImagePath(image(),pathname_.c_str(),(MagickBooleanType) inside_, exceptionInfo); ThrowImageException; } void Magick::Image::clut(const Image &clutImage_, const PixelInterpolateMethod method) { modifyImage(); GetPPException; ClutImage(image(),clutImage_.constImage(),method,exceptionInfo); ThrowImageException; } void Magick::Image::clutChannel(const ChannelType channel_, const Image &clutImage_,const PixelInterpolateMethod method) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); ClutImage(image(),clutImage_.constImage(),method,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::colorize(const unsigned int alpha_,const Color &penColor_) { colorize(alpha_,alpha_,alpha_,penColor_); } void Magick::Image::colorize(const unsigned int alphaRed_, const unsigned int alphaGreen_,const unsigned int alphaBlue_, const Color &penColor_) { char blend[MagickPathExtent]; MagickCore::Image *newImage; PixelInfo target; if (!penColor_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Pen color argument is invalid"); FormatLocaleString(blend,MagickPathExtent,"%u/%u/%u",alphaRed_,alphaGreen_, alphaBlue_); target=static_cast<PixelInfo>(penColor_); GetPPException; newImage=ColorizeImage(image(),blend,&target,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::colorMap(const size_t index_,const Color &color_) { MagickCore::Image *imageptr; imageptr=image(); if (index_ > (MaxColormapSize-1)) throwExceptionExplicit(MagickCore::OptionError, "Colormap index must be less than MaxColormapSize"); if (!color_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Color argument is invalid"); modifyImage(); // Ensure that colormap size is large enough if (colorMapSize() < (index_+1)) colorMapSize(index_+1); // Set color at index in colormap (imageptr->colormap)[index_]=color_; } Magick::Color Magick::Image::colorMap(const size_t index_) const { if (!constImage()->colormap) { throwExceptionExplicit(MagickCore::OptionError, "Image does not contain a colormap"); return(Color()); } if (index_ > constImage()->colors-1) throwExceptionExplicit(MagickCore::OptionError,"Index out of range"); return(Magick::Color((constImage()->colormap)[index_])); } void Magick::Image::colorMatrix(const size_t order_, const double *color_matrix_) { KernelInfo *kernel_info; GetPPException; kernel_info=AcquireKernelInfo((const char *) NULL,exceptionInfo); if (kernel_info != (KernelInfo *) NULL) { kernel_info->width=order_; kernel_info->height=order_; kernel_info->values=(MagickRealType *) AcquireAlignedMemory(order_, order_*sizeof(*kernel_info->values)); if (kernel_info->values != (MagickRealType *) NULL) { MagickCore::Image *newImage; for (ssize_t i=0; i < (ssize_t) (order_*order_); i++) kernel_info->values[i]=color_matrix_[i]; newImage=ColorMatrixImage(image(),kernel_info,exceptionInfo); replaceImage(newImage); } kernel_info=DestroyKernelInfo(kernel_info); } ThrowImageException; } bool Magick::Image::compare(const Image &reference_) const { bool status; Image ref=reference_; GetPPException; status=static_cast<bool>(IsImagesEqual(constImage(),ref.constImage(), exceptionInfo)); ThrowImageException; return(status); } double Magick::Image::compare(const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); ThrowImageException; return(distortion); } double Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetAndSetPPChannelMask(channel_); GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); RestorePPChannelMask; ThrowImageException; return(distortion); } Magick::Image Magick::Image::compare(const Image &reference_, const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; newImage=CompareImages(image(),reference_.constImage(),metric_,distortion, exceptionInfo); ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } Magick::Image Magick::Image::compareChannel(const ChannelType channel_, const Image &reference_,const MetricType metric_,double *distortion) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=CompareImages(image(),reference_.constImage(),metric_,distortion, exceptionInfo); RestorePPChannelMask; ThrowImageException; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::composite(const Image &compositeImage_, const Geometry &offset_,const CompositeOperator compose_) { size_t height=rows(), width=columns(); ssize_t x=offset_.xOff(), y=offset_.yOff(); ParseMetaGeometry(static_cast<std::string>(offset_).c_str(),&x,&y,&width, &height); modifyImage(); GetPPException; CompositeImage(image(),compositeImage_.constImage(),compose_,MagickTrue, x,y,exceptionInfo); ThrowImageException; } void Magick::Image::composite(const Image &compositeImage_, const GravityType gravity_,const CompositeOperator compose_) { RectangleInfo geometry; modifyImage(); SetGeometry(compositeImage_.constImage(),&geometry); GravityAdjustGeometry(columns(),rows(),gravity_,&geometry); GetPPException; CompositeImage(image(),compositeImage_.constImage(),compose_,MagickTrue, geometry.x,geometry.y,exceptionInfo); ThrowImageException; } void Magick::Image::composite(const Image &compositeImage_, const ssize_t xOffset_,const ssize_t yOffset_, const CompositeOperator compose_) { // Image supplied as compositeImage is composited with current image and // results in updating current image. modifyImage(); GetPPException; CompositeImage(image(),compositeImage_.constImage(),compose_,MagickTrue, xOffset_,yOffset_,exceptionInfo); ThrowImageException; } void Magick::Image::connectedComponents(const size_t connectivity_) { MagickCore::Image *newImage; GetPPException; newImage=ConnectedComponentsImage(constImage(),connectivity_, (CCObjectInfo **) NULL,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::contrast(const bool sharpen_) { modifyImage(); GetPPException; ContrastImage(image(),(MagickBooleanType) sharpen_,exceptionInfo); ThrowImageException; } void Magick::Image::contrastStretch(const double blackPoint_, const double whitePoint_) { modifyImage(); GetPPException; ContrastStretchImage(image(),blackPoint_,whitePoint_,exceptionInfo); ThrowImageException; } void Magick::Image::contrastStretchChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); ContrastStretchImage(image(),blackPoint_,whitePoint_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::convolve(const size_t order_,const double *kernel_) { KernelInfo *kernel_info; GetPPException; kernel_info=AcquireKernelInfo((const char *) NULL,exceptionInfo); kernel_info->width=order_; kernel_info->height=order_; kernel_info->x=(ssize_t) (order_-1)/2; kernel_info->y=(ssize_t) (order_-1)/2; kernel_info->values=(MagickRealType *) AcquireAlignedMemory(order_, order_*sizeof(*kernel_info->values)); if (kernel_info->values != (MagickRealType *) NULL) { MagickCore::Image *newImage; for (ssize_t i=0; i < (ssize_t) (order_*order_); i++) kernel_info->values[i]=kernel_[i]; newImage=ConvolveImage(image(),kernel_info,exceptionInfo); replaceImage(newImage); } kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException; } void Magick::Image::copyPixels(const Image &source_,const Geometry &geometry_, const Offset &offset_) { const OffsetInfo offset=offset_; const RectangleInfo geometry=geometry_; GetPPException; (void) CopyImagePixels(image(),source_.constImage(),&geometry,&offset, exceptionInfo); ThrowImageException; } void Magick::Image::crop(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo cropInfo=geometry_; GetPPException; newImage=CropImage(constImage(),&cropInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::cycleColormap(const ssize_t amount_) { modifyImage(); GetPPException; CycleColormapImage(image(),amount_,exceptionInfo); ThrowImageException; } void Magick::Image::decipher(const std::string &passphrase_) { modifyImage(); GetPPException; DecipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::defineSet(const std::string &magick_, const std::string &key_,bool flag_) { std::string definition; modifyImage(); definition=magick_ + ":" + key_; if (flag_) (void) SetImageOption(imageInfo(),definition.c_str(),""); else DeleteImageOption(imageInfo(),definition.c_str()); } bool Magick::Image::defineSet(const std::string &magick_, const std::string &key_ ) const { const char *option; std::string key; key=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),key.c_str()); if (option) return(true); return(false); } void Magick::Image::defineValue(const std::string &magick_, const std::string &key_,const std::string &value_) { std::string format, option; modifyImage(); format=magick_ + ":" + key_; option=value_; (void) SetImageOption(imageInfo(),format.c_str(),option.c_str()); } std::string Magick::Image::defineValue(const std::string &magick_, const std::string &key_) const { const char *option; std::string definition; definition=magick_ + ":" + key_; option=GetImageOption(constImageInfo(),definition.c_str()); if (option) return(std::string(option)); return(std::string()); } void Magick::Image::deskew(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=DeskewImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::despeckle(void) { MagickCore::Image *newImage; GetPPException; newImage=DespeckleImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::display(void) { GetPPException; DisplayImages(imageInfo(),image(),exceptionInfo); ThrowImageException; } void Magick::Image::distort(const DistortMethod method_, const size_t numberArguments_,const double *arguments_,const bool bestfit_) { MagickCore::Image *newImage; GetPPException; newImage=DistortImage(constImage(), method_,numberArguments_,arguments_, bestfit_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::draw(const Magick::Drawable &drawable_) { DrawingWand *wand; modifyImage(); wand=AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { drawable_.operator()(wand); DrawRender(wand); ClonePPDrawException(wand); wand=DestroyDrawingWand(wand); ThrowPPDrawException(quiet()); } } void Magick::Image::draw(const std::vector<Magick::Drawable> &drawable_) { DrawingWand *wand; modifyImage(); wand= AcquireDrawingWand(options()->drawInfo(),image()); if(wand) { for (std::vector<Magick::Drawable>::const_iterator p = drawable_.begin(); p != drawable_.end(); p++ ) { p->operator()(wand); if (DrawGetExceptionType(wand) != MagickCore::UndefinedException) break; } if (DrawGetExceptionType(wand) == MagickCore::UndefinedException) DrawRender(wand); ClonePPDrawException(wand); wand=DestroyDrawingWand(wand); ThrowPPDrawException(quiet()); } } void Magick::Image::edge(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=EdgeImage(constImage(),radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::emboss(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=EmbossImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::encipher(const std::string &passphrase_) { modifyImage(); GetPPException; EncipherImage(image(),passphrase_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::enhance(void) { MagickCore::Image *newImage; GetPPException; newImage=EnhanceImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::equalize(void) { modifyImage(); GetPPException; EqualizeImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::erase(void) { modifyImage(); GetPPException; (void) SetImageBackgroundColor(image(),exceptionInfo); ThrowImageException; } void Magick::Image::evaluate(const ChannelType channel_, const MagickEvaluateOperator operator_,double rvalue_) { GetPPException; GetAndSetPPChannelMask(channel_); EvaluateImage(image(),operator_,rvalue_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::evaluate(const ChannelType channel_, const MagickFunction function_,const size_t number_parameters_, const double *parameters_) { GetPPException; GetAndSetPPChannelMask(channel_); FunctionImage(image(),function_,number_parameters_,parameters_, exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::evaluate(const ChannelType channel_,const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_, const MagickEvaluateOperator operator_,const double rvalue_) { RectangleInfo geometry; MagickCore::Image *cropImage; geometry.width = columns_; geometry.height = rows_; geometry.x = x_; geometry.y = y_; GetPPException; cropImage=CropImage(image(),&geometry,exceptionInfo); GetAndSetPPChannelMask(channel_); EvaluateImage(cropImage,operator_,rvalue_,exceptionInfo); RestorePPChannelMask; (void) CompositeImage(image(),cropImage,image()->alpha_trait == BlendPixelTrait ? OverCompositeOp : CopyCompositeOp,MagickFalse, geometry.x,geometry.y,exceptionInfo ); cropImage=DestroyImageList(cropImage); ThrowImageException; } void Magick::Image::extent(const Geometry &geometry_ ) { MagickCore::Image *newImage; RectangleInfo extentInfo=geometry_; modifyImage(); extentInfo.x=geometry_.xOff(); extentInfo.y=geometry_.yOff(); GetPPException; newImage=ExtentImage(image(),&extentInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); extent(geometry_); } void Magick::Image::extent(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { backgroundColor(backgroundColor_); extent(geometry_,gravity_); } void Magick::Image::extent(const Geometry &geometry_, const GravityType gravity_) { RectangleInfo geometry; SetGeometry(image(),&geometry); geometry.width=geometry_.width(); geometry.height=geometry_.height(); GravityAdjustGeometry(image()->columns,image()->rows,gravity_,&geometry); extent(geometry); } void Magick::Image::flip(void) { MagickCore::Image *newImage; GetPPException; newImage=FlipImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::floodFillAlpha(const ssize_t x_,const ssize_t y_, const unsigned int alpha_,const bool invert_) { PixelInfo target; modifyImage(); target=static_cast<PixelInfo>(pixelColor(x_,y_)); target.alpha=alpha_; GetPPException; GetAndSetPPChannelMask(AlphaChannel); FloodfillPaintImage(image(),options()->drawInfo(),&target,x_,y_, (MagickBooleanType)invert_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::floodFillAlpha(const ssize_t x_,const ssize_t y_, const unsigned int alpha_,const Color &target_,const bool invert_) { PixelInfo target; modifyImage(); target=static_cast<PixelInfo>(target_); target.alpha=alpha_; GetPPException; GetAndSetPPChannelMask(AlphaChannel); FloodfillPaintImage(image(),options()->drawInfo(),&target,x_,y_, (MagickBooleanType)invert_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(pixelColor(x_,y_)); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillColor(const Geometry &point_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { floodFillColor(point_.xOff(),point_.yOff(),fillColor_,borderColor_,invert_); } void Magick::Image::floodFillColor(const ssize_t x_,const ssize_t y_, const Magick::Color &fillColor_,const Magick::Color &borderColor_, const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(borderColor_); floodFill(x_,y_,(Magick::Image *)NULL,fillColor_,&pixel,invert_); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(pixelColor(x_,y_)); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::floodFillTexture(const Magick::Geometry &point_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { floodFillTexture(point_.xOff(),point_.yOff(),texture_,borderColor_,invert_); } void Magick::Image::floodFillTexture(const ssize_t x_,const ssize_t y_, const Magick::Image &texture_,const Magick::Color &borderColor_, const bool invert_) { PixelInfo pixel; modifyImage(); pixel=static_cast<PixelInfo>(borderColor_); floodFill(x_,y_,&texture_,Magick::Color(),&pixel,invert_); } void Magick::Image::flop(void) { MagickCore::Image *newImage; GetPPException; newImage=FlopImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fontTypeMetrics(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); GetPPException; GetTypeMetrics(image(),drawInfo,&(metrics->_typeMetric),exceptionInfo); drawInfo->text=0; ThrowImageException; } void Magick::Image::fontTypeMetricsMultiline(const std::string &text_, TypeMetric *metrics) { DrawInfo *drawInfo; drawInfo=options()->drawInfo(); drawInfo->text=const_cast<char *>(text_.c_str()); GetPPException; GetMultilineTypeMetrics(image(),drawInfo,&(metrics->_typeMetric),exceptionInfo); drawInfo->text=0; ThrowImageException; } void Magick::Image::frame(const Geometry &geometry_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(geometry_.width()); info.y=static_cast<ssize_t>(geometry_.height()); info.width=columns() + (static_cast<size_t>(info.x) << 1); info.height=rows() + (static_cast<size_t>(info.y) << 1); info.outer_bevel=geometry_.xOff(); info.inner_bevel=geometry_.yOff(); GetPPException; newImage=FrameImage(constImage(),&info,image()->compose,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::frame(const size_t width_,const size_t height_, const ssize_t innerBevel_,const ssize_t outerBevel_) { FrameInfo info; MagickCore::Image *newImage; info.x=static_cast<ssize_t>(width_); info.y=static_cast<ssize_t>(height_); info.width=columns() + (static_cast<size_t>(info.x) << 1); info.height=rows() + (static_cast<size_t>(info.y) << 1); info.outer_bevel=static_cast<ssize_t>(outerBevel_); info.inner_bevel=static_cast<ssize_t>(innerBevel_); GetPPException; newImage=FrameImage(constImage(),&info,image()->compose,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression_) { MagickCore::Image *newImage; GetPPException; newImage=FxImage(constImage(),expression_.c_str(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::fx(const std::string expression_, const Magick::ChannelType channel_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=FxImage(constImage(),expression_.c_str(),exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::gamma(const double gamma_) { modifyImage(); GetPPException; GammaImage(image(),gamma_,exceptionInfo); ThrowImageException; } void Magick::Image::gamma(const double gammaRed_,const double gammaGreen_, const double gammaBlue_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(RedChannel); (void) GammaImage(image(),gammaRed_,exceptionInfo); SetPPChannelMask(GreenChannel); (void) GammaImage(image(),gammaGreen_,exceptionInfo); SetPPChannelMask(BlueChannel); (void) GammaImage(image(),gammaBlue_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::gaussianBlur(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=GaussianBlurImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::gaussianBlurChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=GaussianBlurImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } const Magick::Quantum *Magick::Image::getConstPixels(const ssize_t x_, const ssize_t y_,const size_t columns_,const size_t rows_) const { const Quantum *p; GetPPException; p=GetVirtualPixels(constImage(),x_, y_,columns_, rows_,exceptionInfo); ThrowImageException; return(p); } const void *Magick::Image::getConstMetacontent(void) const { const void *result; result=GetVirtualMetacontent(constImage()); if(!result) throwExceptionExplicit(MagickCore::OptionError, "Unable to retrieve meta content."); return(result); } void *Magick::Image::getMetacontent(void ) { void *result; result=GetAuthenticMetacontent(image()); if(!result) throwExceptionExplicit(MagickCore::OptionError, "Unable to retrieve meta content."); return(result); } Magick::Quantum *Magick::Image::getPixels(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_) { Quantum *result; modifyImage(); GetPPException; result=GetAuthenticPixels(image(),x_, y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::grayscale(const PixelIntensityMethod method_) { modifyImage(); GetPPException; (void) GrayscaleImage(image(),method_,exceptionInfo); ThrowImageException; } void Magick::Image::haldClut(const Image &clutImage_) { modifyImage(); GetPPException; (void) HaldClutImage(image(),clutImage_.constImage(),exceptionInfo); ThrowImageException; } void Magick::Image::houghLine(const size_t width_,const size_t height_, const size_t threshold_) { MagickCore::Image *newImage; GetPPException; newImage=HoughLineImage(constImage(),width_,height_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::ImageType Magick::Image::identifyType(void) const { ImageType image_type; GetPPException; image_type=IdentifyImageType(constImage(),exceptionInfo); ThrowImageException; return(image_type); } void Magick::Image::implode(const double factor_) { MagickCore::Image *newImage; GetPPException; newImage=ImplodeImage(constImage(),factor_,image()->interpolate, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::inverseFourierTransform(const Image &phase_) { inverseFourierTransform(phase_,true); } void Magick::Image::inverseFourierTransform(const Image &phase_, const bool magnitude_) { MagickCore::Image *newImage; GetPPException; newImage=InverseFourierTransformImage(constImage(),phase_.constImage(), magnitude_ == true ? MagickTrue : MagickFalse,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwahara(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=KuwaharaImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::kuwaharaChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=KuwaharaImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); RestorePPChannelMask; ThrowImageException; } void Magick::Image::level(const double blackPoint_,const double whitePoint_, const double gamma_) { modifyImage(); GetPPException; (void) LevelImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); ThrowImageException; } void Magick::Image::levelChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_,const double gamma_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) LevelImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::levelColors(const Color &blackColor_, const Color &whiteColor_,const bool invert_) { PixelInfo black, white; modifyImage(); black=static_cast<PixelInfo>(blackColor_); white=static_cast<PixelInfo>(whiteColor_); GetPPException; (void) LevelImageColors(image(),&black,&white,invert_ == true ? MagickTrue : MagickFalse,exceptionInfo); ThrowImageException; } void Magick::Image::levelColorsChannel(const ChannelType channel_, const Color &blackColor_,const Color &whiteColor_,const bool invert_) { PixelInfo black, white; modifyImage(); black=static_cast<PixelInfo>(blackColor_); white=static_cast<PixelInfo>(whiteColor_); GetPPException; GetAndSetPPChannelMask(channel_); (void) LevelImageColors(image(),&black,&white,invert_ == true ? MagickTrue : MagickFalse,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::levelize(const double blackPoint_,const double whitePoint_, const double gamma_) { modifyImage(); GetPPException; (void) LevelizeImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); ThrowImageException; } void Magick::Image::levelizeChannel(const ChannelType channel_, const double blackPoint_,const double whitePoint_,const double gamma_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) LevelizeImage(image(),blackPoint_,whitePoint_,gamma_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::linearStretch(const double blackPoint_, const double whitePoint_) { modifyImage(); GetPPException; LinearStretchImage(image(),blackPoint_,whitePoint_,exceptionInfo); ThrowImageException; } void Magick::Image::liquidRescale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=LiquidRescaleImage(image(),width,height,x,y,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::localContrast(const double radius_,const double strength_) { MagickCore::Image *newImage; GetPPException; newImage=LocalContrastImage(constImage(),radius_,strength_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::magnify(void) { MagickCore::Image *newImage; GetPPException; newImage=MagnifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::map(const Image &mapImage_,const bool dither_) { modifyImage(); GetPPException; options()->quantizeDither(dither_); RemapImage(options()->quantizeInfo(),image(),mapImage_.constImage(), exceptionInfo); ThrowImageException; } void Magick::Image::meanShift(const size_t width_,const size_t height_, const double color_distance_) { MagickCore::Image *newImage; GetPPException; newImage=MeanShiftImage(constImage(),width_,height_,color_distance_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::medianFilter(const double radius_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(image(),MedianStatistic,(size_t) radius_, (size_t) radius_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::minify(void) { MagickCore::Image *newImage; GetPPException; newImage=MinifyImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::modulate(const double brightness_,const double saturation_, const double hue_) { char modulate[MagickPathExtent + 1]; FormatLocaleString(modulate,MagickPathExtent,"%3.6f,%3.6f,%3.6f",brightness_, saturation_,hue_); modifyImage(); GetPPException; ModulateImage(image(),modulate,exceptionInfo); ThrowImageException; } Magick::ImageMoments Magick::Image::moments(void) const { return(ImageMoments(*this)); } void Magick::Image::morphology(const MorphologyMethod method_, const std::string kernel_,const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; GetPPException; kernel=AcquireKernelInfo(kernel_.c_str(),exceptionInfo); if (kernel == (KernelInfo *) NULL) throwExceptionExplicit(MagickCore::OptionError,"Unable to parse kernel."); newImage=MorphologyImage(constImage(),method_,iterations_,kernel, exceptionInfo); replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphology(const MorphologyMethod method_, const KernelInfoType kernel_,const std::string arguments_, const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(MagickCore::OptionError, "Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphology(method_,kernel,iterations_); } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const std::string kernel_, const ssize_t iterations_) { KernelInfo *kernel; MagickCore::Image *newImage; GetPPException; kernel=AcquireKernelInfo(kernel_.c_str(),exceptionInfo); if (kernel == (KernelInfo *)NULL) { throwExceptionExplicit(MagickCore::OptionError, "Unable to parse kernel."); return; } GetAndSetPPChannelMask(channel_); newImage=MorphologyImage(constImage(),method_,iterations_,kernel, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); kernel=DestroyKernelInfo(kernel); ThrowImageException; } void Magick::Image::morphologyChannel(const ChannelType channel_, const MorphologyMethod method_,const KernelInfoType kernel_, const std::string arguments_,const ssize_t iterations_) { const char *option; std::string kernel; option=CommandOptionToMnemonic(MagickKernelOptions,kernel_); if (option == (const char *)NULL) { throwExceptionExplicit(MagickCore::OptionError, "Unable to determine kernel type."); return; } kernel=std::string(option); if (!arguments_.empty()) kernel+=":"+arguments_; morphologyChannel(channel_,method_,kernel,iterations_); } void Magick::Image::motionBlur(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=MotionBlurImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::negate(const bool grayscale_) { modifyImage(); GetPPException; NegateImage(image(),(MagickBooleanType) grayscale_,exceptionInfo); ThrowImageException; } void Magick::Image::negateChannel(const ChannelType channel_, const bool grayscale_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); NegateImage(image(),(MagickBooleanType) grayscale_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::normalize(void) { modifyImage(); GetPPException; NormalizeImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::oilPaint(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=OilPaintImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::opaque(const Color &opaqueColor_,const Color &penColor_, const bool invert_) { std::string opaqueColor, penColor; PixelInfo opaque, pen; if (!opaqueColor_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Opaque color argument is invalid"); if (!penColor_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Pen color argument is invalid"); modifyImage(); opaqueColor=opaqueColor_; penColor=penColor_; GetPPException; (void) QueryColorCompliance(opaqueColor.c_str(),AllCompliance,&opaque, exceptionInfo); (void) QueryColorCompliance(penColor.c_str(),AllCompliance,&pen, exceptionInfo); OpaquePaintImage(image(),&opaque,&pen,invert_ ? MagickTrue : MagickFalse, exceptionInfo); ThrowImageException; } void Magick::Image::orderedDither(std::string thresholdMap_) { modifyImage(); GetPPException; (void) OrderedDitherImage(image(),thresholdMap_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::orderedDitherChannel(const ChannelType channel_, std::string thresholdMap_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void)OrderedDitherImage(image(),thresholdMap_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::perceptible(const double epsilon_) { modifyImage(); GetPPException; PerceptibleImage(image(),epsilon_,exceptionInfo); ThrowImageException; } void Magick::Image::perceptibleChannel(const ChannelType channel_, const double epsilon_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); PerceptibleImage(image(),epsilon_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } Magick::ImagePerceptualHash Magick::Image::perceptualHash() const { return(ImagePerceptualHash(*this)); } void Magick::Image::ping(const std::string &imageSpec_) { MagickCore::Image *newImage; GetPPException; options()->fileName(imageSpec_); newImage=PingImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::ping(const Blob& blob_) { MagickCore::Image *newImage; GetPPException; newImage=PingBlob(imageInfo(),blob_.data(),blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::pixelColor(const ssize_t x_,const ssize_t y_, const Color &color_) { PixelInfo packet; Quantum *pixel; // Test arguments to ensure they are within the image. if (y_ > (ssize_t) rows() || x_ > (ssize_t) columns()) throwExceptionExplicit(MagickCore::OptionError, "Access outside of image boundary"); modifyImage(); // Set image to DirectClass classType(DirectClass ); // Get pixel view Pixels pixels(*this); // Set pixel value pixel=pixels.get(x_, y_, 1, 1 ); packet=color_; MagickCore::SetPixelViaPixelInfo(constImage(),&packet,pixel); // Tell ImageMagick that pixels have been updated pixels.sync(); } Magick::Color Magick::Image::pixelColor(const ssize_t x_, const ssize_t y_) const { const Quantum *pixel; pixel=getConstPixels(x_,y_,1,1); if (pixel) { PixelInfo packet; MagickCore::GetPixelInfoPixel(constImage(),pixel,&packet); return(Color(packet)); } return(Color()); // invalid } void Magick::Image::polaroid(const std::string &caption_,const double angle_, const PixelInterpolateMethod method_) { MagickCore::Image *newImage; GetPPException; newImage=PolaroidImage(constImage(),options()->drawInfo(),caption_.c_str(), angle_,method_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::posterize(const size_t levels_,const DitherMethod method_) { modifyImage(); GetPPException; PosterizeImage(image(),levels_,method_,exceptionInfo); ThrowImageException; } void Magick::Image::posterizeChannel(const ChannelType channel_, const size_t levels_,const DitherMethod method_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); PosterizeImage(image(),levels_,method_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::process(std::string name_,const ssize_t argc, const char **argv) { modifyImage(); GetPPException; (void) InvokeDynamicImageFilter(name_.c_str(),&image(),argc,argv, exceptionInfo); ThrowImageException; } void Magick::Image::profile(const std::string name_, const Magick::Blob &profile_) { modifyImage(); GetPPException; (void) ProfileImage(image(),name_.c_str(),(unsigned char *)profile_.data(), profile_.length(),exceptionInfo); ThrowImageException; } Magick::Blob Magick::Image::profile(const std::string name_) const { const StringInfo *profile; profile=GetImageProfile(constImage(),name_.c_str()); if (profile == (StringInfo *) NULL) return(Blob()); return(Blob((void*) GetStringInfoDatum(profile),GetStringInfoLength( profile))); } void Magick::Image::quantize(const bool measureError_) { modifyImage(); if (measureError_) options()->quantizeInfo()->measure_error=MagickTrue; else options()->quantizeInfo()->measure_error=MagickFalse; GetPPException; QuantizeImage(options()->quantizeInfo(),image(),exceptionInfo); ThrowImageException; } void Magick::Image::raise(const Geometry &geometry_,const bool raisedFlag_) { RectangleInfo raiseInfo=geometry_; GetPPException; modifyImage(); RaiseImage(image(),&raiseInfo,raisedFlag_ == true ? MagickTrue : MagickFalse, exceptionInfo); ThrowImageException; } void Magick::Image::randomThreshold(const double low_,const double high_) { GetPPException; (void) RandomThresholdImage(image(),low_,high_,exceptionInfo); ThrowImageException; } void Magick::Image::randomThresholdChannel(const ChannelType channel_, const double low_,const double high_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); (void) RandomThresholdImage(image(),low_,high_,exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::read(const Blob &blob_) { MagickCore::Image *newImage; GetPPException; newImage=BlobToImage(imageInfo(),static_cast<const void *>(blob_.data()), blob_.length(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::read(const Blob &blob_,const Geometry &size_) { size(size_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_) { size(size_); depth(depth_); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const size_t depth_,const std::string &magick_) { size(size_); depth(depth_); magick(magick_); // Set explicit image format fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Blob &blob_,const Geometry &size_, const std::string &magick_) { size(size_); magick(magick_); // Set explicit image format fileName(magick_ + ':'); read(blob_); } void Magick::Image::read(const Geometry &size_,const std::string &imageSpec_) { size(size_); read(imageSpec_); } void Magick::Image::read(const size_t width_,const size_t height_, const std::string &map_,const StorageType type_,const void *pixels_) { MagickCore::Image *newImage; GetPPException; newImage=ConstituteImage(width_,height_,map_.c_str(),type_, pixels_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::read(const std::string &imageSpec_) { MagickCore::Image *newImage; GetPPException; options()->fileName(imageSpec_); newImage=ReadImage(imageInfo(),exceptionInfo); read(newImage,exceptionInfo); } void Magick::Image::readMask(const Magick::Image &mask_) { mask(mask_,ReadPixelMask); } Magick::Image Magick::Image::readMask(void) const { return(mask(ReadPixelMask)); } void Magick::Image::readPixels(const Magick::QuantumType quantum_, const unsigned char *source_) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(imageInfo(),image()); GetPPException; ImportQuantumPixels(image(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,source_,exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::reduceNoise(void) { reduceNoise(3); } void Magick::Image::reduceNoise(const size_t order_) { MagickCore::Image *newImage; GetPPException; newImage=StatisticImage(constImage(),NonpeakStatistic,order_, order_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::repage() { modifyImage(); options()->page(Geometry()); image()->page.width = 0; image()->page.height = 0; image()->page.x = 0; image()->page.y = 0; } void Magick::Image::resample(const Point &density_) { MagickCore::Image *newImage; GetPPException; newImage=ResampleImage(constImage(),density_.x(),density_.y(), image()->filter,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::resize(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; // Calculate new size. This code should be supported using binary arguments // in the ImageMagick library. ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const Geometry &roll_) { MagickCore::Image *newImage; GetPPException; newImage=RollImage(constImage(),roll_.xOff(),roll_.yOff(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::roll(const size_t columns_,const size_t rows_) { MagickCore::Image *newImage; GetPPException; newImage=RollImage(constImage(),static_cast<ssize_t>(columns_), static_cast<ssize_t>(rows_),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotate(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=RotateImage(constImage(),degrees_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlur(const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=RotationalBlurImage(constImage(),angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::rotationalBlurChannel(const ChannelType channel_, const double angle_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=RotationalBlurImage(constImage(),angle_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::sample(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=SampleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::scale(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ScaleImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::segment(const double clusterThreshold_, const double smoothingThreshold_) { modifyImage(); GetPPException; SegmentImage(image(),options()->quantizeColorSpace(), (MagickBooleanType) options()->verbose(),clusterThreshold_, smoothingThreshold_,exceptionInfo); SyncImage(image(),exceptionInfo); ThrowImageException; } void Magick::Image::selectiveBlur(const double radius_,const double sigma_, const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SelectiveBlurImage(constImage(),radius_,sigma_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::selectiveBlurChannel(const ChannelType channel_, const double radius_,const double sigma_,const double threshold_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=SelectiveBlurImage(constImage(),radius_,sigma_,threshold_, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::separate(const ChannelType channel_) const { MagickCore::Image *image; GetPPException; image=SeparateImage(constImage(),channel_,exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::sepiaTone(const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=SepiaToneImage(constImage(),threshold_,exceptionInfo); replaceImage(newImage); ThrowImageException; } bool Magick::Image::setColorMetric(const Image &reference_) { bool status; Image ref=reference_; GetPPException; modifyImage(); status=static_cast<bool>(SetImageColorMetric(image(),ref.constImage(), exceptionInfo)); ThrowImageException; return(status); } Magick::Quantum *Magick::Image::setPixels(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_) { Quantum *result; modifyImage(); GetPPException; result=QueueAuthenticPixels(image(),x_,y_,columns_,rows_,exceptionInfo); ThrowImageException; return(result); } void Magick::Image::shade(const double azimuth_,const double elevation_, const bool colorShading_) { MagickCore::Image *newImage; GetPPException; newImage=ShadeImage(constImage(),colorShading_ == true ? MagickTrue : MagickFalse,azimuth_,elevation_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shadow(const double percent_opacity_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=ShadowImage(constImage(),percent_opacity_, sigma_,x_, y_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpen(const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; newImage=SharpenImage(constImage(),radius_,sigma_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sharpenChannel(const ChannelType channel_, const double radius_,const double sigma_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=SharpenImage(constImage(),radius_,sigma_,exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::shave(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo shaveInfo=geometry_; GetPPException; newImage=ShaveImage(constImage(),&shaveInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::shear(const double xShearAngle_,const double yShearAngle_) { MagickCore::Image *newImage; GetPPException; newImage=ShearImage(constImage(),xShearAngle_,yShearAngle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::sigmoidalContrast(const bool sharpen_, const double contrast,const double midpoint) { modifyImage(); GetPPException; (void) SigmoidalContrastImage(image(),(MagickBooleanType) sharpen_,contrast, midpoint,exceptionInfo); ThrowImageException; } std::string Magick::Image::signature(const bool force_) const { return(_imgRef->signature(force_)); } void Magick::Image::sketch(const double radius_,const double sigma_, const double angle_) { MagickCore::Image *newImage; GetPPException; newImage=SketchImage(constImage(),radius_,sigma_,angle_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::solarize(const double factor_) { modifyImage(); GetPPException; SolarizeImage(image(),factor_,exceptionInfo); ThrowImageException; } void Magick::Image::sparseColor(const ChannelType channel_, const SparseColorMethod method_,const size_t numberArguments_, const double *arguments_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=SparseColorImage(constImage(),method_,numberArguments_,arguments_, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_) { MagickCore::Image *newImage; RectangleInfo spliceInfo=geometry_; GetPPException; newImage=SpliceImage(constImage(),&spliceInfo,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_) { backgroundColor(backgroundColor_); splice(geometry_); } void Magick::Image::splice(const Geometry &geometry_, const Color &backgroundColor_,const GravityType gravity_) { backgroundColor(backgroundColor_); image()->gravity=gravity_; splice(geometry_); } void Magick::Image::spread(const double amount_) { MagickCore::Image *newImage; GetPPException; newImage=SpreadImage(constImage(),image()->interpolate,amount_,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::ImageStatistics Magick::Image::statistics() const { return(ImageStatistics(*this)); } void Magick::Image::stegano(const Image &watermark_) { MagickCore::Image *newImage; GetPPException; newImage=SteganoImage(constImage(),watermark_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::stereo(const Image &rightImage_) { MagickCore::Image *newImage; GetPPException; newImage=StereoImage(constImage(),rightImage_.constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::strip(void) { modifyImage(); GetPPException; StripImage(image(),exceptionInfo); ThrowImageException; } Magick::Image Magick::Image::subImageSearch(const Image &reference_, const MetricType metric_,Geometry *offset_,double *similarityMetric_, const double similarityThreshold) { MagickCore::Image *newImage; RectangleInfo offset; GetPPException; newImage=SimilarityImage(image(),reference_.constImage(),metric_, similarityThreshold,&offset,similarityMetric_,exceptionInfo); ThrowImageException; if (offset_ != (Geometry *) NULL) *offset_=offset; if (newImage == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(newImage)); } void Magick::Image::swirl(const double degrees_) { MagickCore::Image *newImage; GetPPException; newImage=SwirlImage(constImage(),degrees_,image()->interpolate, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::syncPixels(void) { GetPPException; (void) SyncAuthenticPixels(image(),exceptionInfo); ThrowImageException; } void Magick::Image::texture(const Image &texture_) { modifyImage(); GetPPException; TextureImage(image(),texture_.constImage(),exceptionInfo); ThrowImageException; } void Magick::Image::threshold(const double threshold_) { modifyImage(); GetPPException; BilevelImage(image(),threshold_,exceptionInfo); ThrowImageException; } void Magick::Image::thumbnail(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ThumbnailImage(constImage(),width,height,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::tint(const std::string opacity_) { MagickCore::Image *newImage; PixelInfo color; GetPPException; color=static_cast<PixelInfo>(constOptions()->fillColor()); newImage=TintImage(constImage(),opacity_.c_str(),&color,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transformOrigin(const double x_,const double y_) { modifyImage(); options()->transformOrigin(x_,y_); } void Magick::Image::transformReset(void) { modifyImage(); options()->transformReset(); } void Magick::Image::transformScale(const double sx_,const double sy_) { modifyImage(); options()->transformScale(sx_,sy_); } void Magick::Image::transparent(const Color &color_,const bool inverse_) { PixelInfo target; std::string color; if (!color_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Color argument is invalid"); color=color_; GetPPException; (void) QueryColorCompliance(color.c_str(),AllCompliance,&target, exceptionInfo); modifyImage(); TransparentPaintImage(image(),&target,TransparentAlpha, inverse_ == true ? MagickTrue : MagickFalse,exceptionInfo); ThrowImageException; } void Magick::Image::transparentChroma(const Color &colorLow_, const Color &colorHigh_) { std::string colorHigh, colorLow; PixelInfo targetHigh, targetLow; if (!colorLow_.isValid() || !colorHigh_.isValid()) throwExceptionExplicit(MagickCore::OptionError, "Color argument is invalid"); colorLow=colorLow_; colorHigh=colorHigh_; GetPPException; (void) QueryColorCompliance(colorLow.c_str(),AllCompliance,&targetLow, exceptionInfo); (void) QueryColorCompliance(colorHigh.c_str(),AllCompliance,&targetHigh, exceptionInfo); modifyImage(); TransparentPaintImageChroma(image(),&targetLow,&targetHigh,TransparentAlpha, MagickFalse,exceptionInfo); ThrowImageException; } void Magick::Image::transpose(void) { MagickCore::Image *newImage; GetPPException; newImage=TransposeImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::transverse(void) { MagickCore::Image *newImage; GetPPException; newImage=TransverseImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::trim(void) { MagickCore::Image *newImage; GetPPException; newImage=TrimImage(constImage(),exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image Magick::Image::uniqueColors(void) const { MagickCore::Image *image; GetPPException; image=UniqueImageColors(constImage(),exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); } void Magick::Image::unsharpmask(const double radius_,const double sigma_, const double amount_,const double threshold_) { MagickCore::Image *newImage; GetPPException; newImage=UnsharpMaskImage(constImage(),radius_,sigma_,amount_,threshold_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::unsharpmaskChannel(const ChannelType channel_, const double radius_,const double sigma_,const double amount_, const double threshold_) { MagickCore::Image *newImage; GetPPException; GetAndSetPPChannelMask(channel_); newImage=UnsharpMaskImage(constImage(),radius_,sigma_,amount_,threshold_, exceptionInfo); RestorePPChannelMask; replaceImage(newImage); ThrowImageException; } void Magick::Image::vignette(const double radius_,const double sigma_, const ssize_t x_,const ssize_t y_) { MagickCore::Image *newImage; GetPPException; newImage=VignetteImage(constImage(),radius_,sigma_,x_,y_,exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::wave(const double amplitude_,const double wavelength_) { MagickCore::Image *newImage; GetPPException; newImage=WaveImage(constImage(),amplitude_,wavelength_,image()->interpolate, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::waveletDenoise(const double threshold_, const double softness_) { MagickCore::Image *newImage; GetPPException; newImage=WaveletDenoiseImage(constImage(),threshold_,softness_, exceptionInfo); replaceImage(newImage); ThrowImageException; } void Magick::Image::whiteThreshold(const std::string &threshold_) { modifyImage(); GetPPException; WhiteThresholdImage(image(),threshold_.c_str(),exceptionInfo); ThrowImageException; } void Magick::Image::whiteThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); WhiteThresholdImage(image(),threshold_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; } void Magick::Image::write(Blob *blob_) { size_t length=0; void *data; modifyImage(); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; } void Magick::Image::write(Blob *blob_,const std::string &magick_) { size_t length=0; void *data; modifyImage(); magick(magick_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; } void Magick::Image::write(Blob *blob_,const std::string &magick_, const size_t depth_) { size_t length=0; void *data; modifyImage(); magick(magick_); depth(depth_); GetPPException; data=ImagesToBlob(constImageInfo(),image(),&length,exceptionInfo); if (length > 0) blob_->updateNoCopy(data,length,Blob::MallocAllocator); ThrowImageException; } void Magick::Image::write(const ssize_t x_,const ssize_t y_, const size_t columns_,const size_t rows_,const std::string &map_, const StorageType type_,void *pixels_) { GetPPException; ExportImagePixels(image(),x_,y_,columns_,rows_,map_.c_str(),type_,pixels_, exceptionInfo); ThrowImageException; } void Magick::Image::write(const std::string &imageSpec_) { modifyImage(); fileName(imageSpec_); GetPPException; WriteImage(constImageInfo(),image(),exceptionInfo); ThrowImageException; } void Magick::Image::writeMask(const Magick::Image &mask_) { mask(mask_,WritePixelMask); } Magick::Image Magick::Image::writeMask(void) const { return(mask(WritePixelMask)); } void Magick::Image::writePixels(const Magick::QuantumType quantum_, unsigned char *destination_) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(imageInfo(),image()); GetPPException; ExportQuantumPixels(image(),(MagickCore::CacheView *) NULL,quantum_info, quantum_,destination_, exceptionInfo); quantum_info=DestroyQuantumInfo(quantum_info); ThrowImageException; } void Magick::Image::zoom(const Geometry &geometry_) { MagickCore::Image *newImage; size_t height=rows(), width=columns(); ssize_t x=0, y=0; ParseMetaGeometry(static_cast<std::string>(geometry_).c_str(),&x,&y,&width, &height); GetPPException; newImage=ResizeImage(constImage(),width,height,image()->filter,exceptionInfo); replaceImage(newImage); ThrowImageException; } Magick::Image::Image(MagickCore::Image *image_) : _imgRef(new ImageRef(image_)) { } MagickCore::Image *&Magick::Image::image(void) { return(_imgRef->image()); } const MagickCore::Image *Magick::Image::constImage(void) const { return(_imgRef->image()); } MagickCore::ImageInfo *Magick::Image::imageInfo(void) { return(_imgRef->options()->imageInfo()); } const MagickCore::ImageInfo *Magick::Image::constImageInfo(void) const { return(_imgRef->options()->imageInfo()); } Magick::Options *Magick::Image::options(void) { return(_imgRef->options()); } const Magick::Options *Magick::Image::constOptions(void) const { return(_imgRef->options()); } MagickCore::QuantizeInfo *Magick::Image::quantizeInfo(void) { return(_imgRef->options()->quantizeInfo()); } const MagickCore::QuantizeInfo *Magick::Image::constQuantizeInfo(void) const { return(_imgRef->options()->quantizeInfo()); } void Magick::Image::modifyImage(void) { if (!_imgRef->isShared()) return; GetPPException; replaceImage(CloneImage(image(),0,0,MagickTrue,exceptionInfo)); ThrowImageException; } MagickCore::Image *Magick::Image::replaceImage(MagickCore::Image *replacement_) { MagickCore::Image *image; if (replacement_) image=replacement_; else { GetPPException; image=AcquireImage(constImageInfo(),exceptionInfo); ThrowImageException; } _imgRef=ImageRef::replaceImage(_imgRef,image); return(image); } void Magick::Image::read(MagickCore::Image *image, MagickCore::ExceptionInfo *exceptionInfo) { // Ensure that multiple image frames were not read. if (image != (MagickCore::Image *) NULL && image->next != (MagickCore::Image *) NULL) { MagickCore::Image *next; // Destroy any extra image frames next=image->next; image->next=(MagickCore::Image *) NULL; next->previous=(MagickCore::Image *) NULL; DestroyImageList(next); } replaceImage(image); if (exceptionInfo->severity == MagickCore::UndefinedException && image == (MagickCore::Image *) NULL) { (void) MagickCore::DestroyExceptionInfo(exceptionInfo); if (!quiet()) throwExceptionExplicit(MagickCore::ImageWarning, "No image was loaded."); } ThrowImageException; } void Magick::Image::floodFill(const ssize_t x_,const ssize_t y_, const Magick::Image *fillPattern_,const Magick::Color &fill_, const MagickCore::PixelInfo *target_,const bool invert_) { Magick::Color fillColor; MagickCore::Image *fillPattern; // Set drawing fill pattern or fill color fillColor=options()->fillColor(); fillPattern=(MagickCore::Image *)NULL; if (options()->fillPattern() != (MagickCore::Image *)NULL) { GetPPException; fillPattern=CloneImage(options()->fillPattern(),0,0,MagickTrue, exceptionInfo); ThrowImageException; } if (fillPattern_ == (Magick::Image *)NULL) { options()->fillPattern((MagickCore::Image *)NULL); options()->fillColor(fill_); } else options()->fillPattern(fillPattern_->constImage()); GetPPException; (void) FloodfillPaintImage(image(),options()->drawInfo(), target_,static_cast<ssize_t>(x_),static_cast<ssize_t>(y_), (MagickBooleanType) invert_,exceptionInfo); options()->fillColor(fillColor); options()->fillPattern(fillPattern); ThrowImageException; } void Magick::Image::mask(const Magick::Image &mask_,const PixelMask type) { modifyImage(); GetPPException; if (mask_.isValid()) SetImageMask(image(),type,mask_.constImage(),exceptionInfo); else SetImageMask(image(),type,(MagickCore::Image *) NULL, exceptionInfo); ThrowImageException; } Magick::Image Magick::Image::mask(const PixelMask type) const { MagickCore::Image *image; GetPPException; image = GetImageMask(constImage(),type,exceptionInfo); ThrowImageException; if (image == (MagickCore::Image *) NULL) return(Magick::Image()); else return(Magick::Image(image)); }
./CrossVul/dataset_final_sorted/CWE-416/cpp/bad_2968_0
crossvul-cpp_data_bad_4231_2
/* ** $Id: ltm.c $ ** Tag methods ** See Copyright Notice in lua.h */ #define ltm_c #define LUA_CORE #include "lprefix.h" #include <string.h> #include "lua.h" #include "ldebug.h" #include "ldo.h" #include "lgc.h" #include "lobject.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lvm.h" static const char udatatypename[] = "userdata"; LUAI_DDEF const char *const luaT_typenames_[LUA_TOTALTYPES] = { "no value", "nil", "boolean", udatatypename, "number", "string", "table", "function", udatatypename, "thread", "upvalue", "proto" /* these last cases are used for tests only */ }; void luaT_init (lua_State *L) { static const char *const luaT_eventname[] = { /* ORDER TM */ "__index", "__newindex", "__gc", "__mode", "__len", "__eq", "__add", "__sub", "__mul", "__mod", "__pow", "__div", "__idiv", "__band", "__bor", "__bxor", "__shl", "__shr", "__unm", "__bnot", "__lt", "__le", "__concat", "__call", "__close" }; int i; for (i=0; i<TM_N; i++) { G(L)->tmname[i] = luaS_new(L, luaT_eventname[i]); luaC_fix(L, obj2gco(G(L)->tmname[i])); /* never collect these names */ } } /* ** function to be used with macro "fasttm": optimized for absence of ** tag methods */ const TValue *luaT_gettm (Table *events, TMS event, TString *ename) { const TValue *tm = luaH_getshortstr(events, ename); lua_assert(event <= TM_EQ); if (notm(tm)) { /* no tag method? */ events->flags |= cast_byte(1u<<event); /* cache this fact */ return NULL; } else return tm; } const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o, TMS event) { Table *mt; switch (ttype(o)) { case LUA_TTABLE: mt = hvalue(o)->metatable; break; case LUA_TUSERDATA: mt = uvalue(o)->metatable; break; default: mt = G(L)->mt[ttype(o)]; } return (mt ? luaH_getshortstr(mt, G(L)->tmname[event]) : &G(L)->nilvalue); } /* ** Return the name of the type of an object. For tables and userdata ** with metatable, use their '__name' metafield, if present. */ const char *luaT_objtypename (lua_State *L, const TValue *o) { Table *mt; if ((ttistable(o) && (mt = hvalue(o)->metatable) != NULL) || (ttisfulluserdata(o) && (mt = uvalue(o)->metatable) != NULL)) { const TValue *name = luaH_getshortstr(mt, luaS_new(L, "__name")); if (ttisstring(name)) /* is '__name' a string? */ return getstr(tsvalue(name)); /* use it as type name */ } return ttypename(ttype(o)); /* else use standard type name */ } void luaT_callTM (lua_State *L, const TValue *f, const TValue *p1, const TValue *p2, const TValue *p3) { StkId func = L->top; setobj2s(L, func, f); /* push function (assume EXTRA_STACK) */ setobj2s(L, func + 1, p1); /* 1st argument */ setobj2s(L, func + 2, p2); /* 2nd argument */ setobj2s(L, func + 3, p3); /* 3rd argument */ L->top = func + 4; /* metamethod may yield only when called from Lua code */ if (isLuacode(L->ci)) luaD_call(L, func, 0); else luaD_callnoyield(L, func, 0); } void luaT_callTMres (lua_State *L, const TValue *f, const TValue *p1, const TValue *p2, StkId res) { ptrdiff_t result = savestack(L, res); StkId func = L->top; setobj2s(L, func, f); /* push function (assume EXTRA_STACK) */ setobj2s(L, func + 1, p1); /* 1st argument */ setobj2s(L, func + 2, p2); /* 2nd argument */ L->top += 3; /* metamethod may yield only when called from Lua code */ if (isLuacode(L->ci)) luaD_call(L, func, 1); else luaD_callnoyield(L, func, 1); res = restorestack(L, result); setobjs2s(L, res, --L->top); /* move result to its place */ } static int callbinTM (lua_State *L, const TValue *p1, const TValue *p2, StkId res, TMS event) { const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */ if (notm(tm)) tm = luaT_gettmbyobj(L, p2, event); /* try second operand */ if (notm(tm)) return 0; luaT_callTMres(L, tm, p1, p2, res); return 1; } void luaT_trybinTM (lua_State *L, const TValue *p1, const TValue *p2, StkId res, TMS event) { if (!callbinTM(L, p1, p2, res, event)) { switch (event) { case TM_BAND: case TM_BOR: case TM_BXOR: case TM_SHL: case TM_SHR: case TM_BNOT: { if (ttisnumber(p1) && ttisnumber(p2)) luaG_tointerror(L, p1, p2); else luaG_opinterror(L, p1, p2, "perform bitwise operation on"); } /* calls never return, but to avoid warnings: *//* FALLTHROUGH */ default: luaG_opinterror(L, p1, p2, "perform arithmetic on"); } } } void luaT_tryconcatTM (lua_State *L) { StkId top = L->top; if (!callbinTM(L, s2v(top - 2), s2v(top - 1), top - 2, TM_CONCAT)) luaG_concaterror(L, s2v(top - 2), s2v(top - 1)); } void luaT_trybinassocTM (lua_State *L, const TValue *p1, const TValue *p2, int flip, StkId res, TMS event) { if (flip) luaT_trybinTM(L, p2, p1, res, event); else luaT_trybinTM(L, p1, p2, res, event); } void luaT_trybiniTM (lua_State *L, const TValue *p1, lua_Integer i2, int flip, StkId res, TMS event) { TValue aux; setivalue(&aux, i2); luaT_trybinassocTM(L, p1, &aux, flip, res, event); } /* ** Calls an order tag method. ** For lessequal, LUA_COMPAT_LT_LE keeps compatibility with old ** behavior: if there is no '__le', try '__lt', based on l <= r iff ** !(r < l) (assuming a total order). If the metamethod yields during ** this substitution, the continuation has to know about it (to negate ** the result of r<l); bit CIST_LEQ in the call status keeps that ** information. */ int luaT_callorderTM (lua_State *L, const TValue *p1, const TValue *p2, TMS event) { if (callbinTM(L, p1, p2, L->top, event)) /* try original event */ return !l_isfalse(s2v(L->top)); #if defined(LUA_COMPAT_LT_LE) else if (event == TM_LE) { /* try '!(p2 < p1)' for '(p1 <= p2)' */ L->ci->callstatus |= CIST_LEQ; /* mark it is doing 'lt' for 'le' */ if (callbinTM(L, p2, p1, L->top, TM_LT)) { L->ci->callstatus ^= CIST_LEQ; /* clear mark */ return l_isfalse(s2v(L->top)); } /* else error will remove this 'ci'; no need to clear mark */ } #endif luaG_ordererror(L, p1, p2); /* no metamethod found */ return 0; /* to avoid warnings */ } int luaT_callorderiTM (lua_State *L, const TValue *p1, int v2, int flip, int isfloat, TMS event) { TValue aux; const TValue *p2; if (isfloat) { setfltvalue(&aux, cast_num(v2)); } else setivalue(&aux, v2); if (flip) { /* arguments were exchanged? */ p2 = p1; p1 = &aux; /* correct them */ } else p2 = &aux; return luaT_callorderTM(L, p1, p2, event); } void luaT_adjustvarargs (lua_State *L, int nfixparams, CallInfo *ci, const Proto *p) { int i; int actual = cast_int(L->top - ci->func) - 1; /* number of arguments */ int nextra = actual - nfixparams; /* number of extra arguments */ ci->u.l.nextraargs = nextra; checkstackGC(L, p->maxstacksize + 1); /* copy function to the top of the stack */ setobjs2s(L, L->top++, ci->func); /* move fixed parameters to the top of the stack */ for (i = 1; i <= nfixparams; i++) { setobjs2s(L, L->top++, ci->func + i); setnilvalue(s2v(ci->func + i)); /* erase original parameter (for GC) */ } ci->func += actual + 1; ci->top += actual + 1; lua_assert(L->top <= ci->top && ci->top <= L->stack_last); } void luaT_getvarargs (lua_State *L, CallInfo *ci, StkId where, int wanted) { int i; int nextra = ci->u.l.nextraargs; if (wanted < 0) { wanted = nextra; /* get all extra arguments available */ checkstackp(L, nextra, where); /* ensure stack space */ L->top = where + nextra; /* next instruction will need top */ } for (i = 0; i < wanted && i < nextra; i++) setobjs2s(L, where + i, ci->func - nextra + i); for (; i < wanted; i++) /* complete required results with nil */ setnilvalue(s2v(where + i)); }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_4231_2
crossvul-cpp_data_bad_820_0
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/init.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/sched/signal.h> #include <linux/uio.h> #include <linux/miscdevice.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/pipe_fs_i.h> #include <linux/swap.h> #include <linux/splice.h> #include <linux/sched.h> MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); /* Ordinary requests have even IDs, while interrupts IDs are odd */ #define FUSE_INT_REQ_BIT (1ULL << 0) #define FUSE_REQ_ID_STEP (1ULL << 1) static struct kmem_cache *fuse_req_cachep; static struct fuse_dev *fuse_get_dev(struct file *file) { /* * Lockless access is OK, because file->private data is set * once during mount and is valid until the file is released. */ return READ_ONCE(file->private_data); } static void fuse_request_init(struct fuse_req *req, struct page **pages, struct fuse_page_desc *page_descs, unsigned npages) { INIT_LIST_HEAD(&req->list); INIT_LIST_HEAD(&req->intr_entry); init_waitqueue_head(&req->waitq); refcount_set(&req->count, 1); req->pages = pages; req->page_descs = page_descs; req->max_pages = npages; __set_bit(FR_PENDING, &req->flags); } static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags, struct fuse_page_desc **desc) { struct page **pages; pages = kzalloc(npages * (sizeof(struct page *) + sizeof(struct fuse_page_desc)), flags); *desc = (void *) pages + npages * sizeof(struct page *); return pages; } static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) { struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); if (req) { struct page **pages = NULL; struct fuse_page_desc *page_descs = NULL; WARN_ON(npages > FUSE_MAX_MAX_PAGES); if (npages > FUSE_REQ_INLINE_PAGES) { pages = fuse_req_pages_alloc(npages, flags, &page_descs); if (!pages) { kmem_cache_free(fuse_req_cachep, req); return NULL; } } else if (npages) { pages = req->inline_pages; page_descs = req->inline_page_descs; } fuse_request_init(req, pages, page_descs, npages); } return req; } struct fuse_req *fuse_request_alloc(unsigned npages) { return __fuse_request_alloc(npages, GFP_KERNEL); } EXPORT_SYMBOL_GPL(fuse_request_alloc); struct fuse_req *fuse_request_alloc_nofs(unsigned npages) { return __fuse_request_alloc(npages, GFP_NOFS); } static void fuse_req_pages_free(struct fuse_req *req) { if (req->pages != req->inline_pages) kfree(req->pages); } bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req, gfp_t flags) { struct page **pages; struct fuse_page_desc *page_descs; unsigned int npages = min_t(unsigned int, max_t(unsigned int, req->max_pages * 2, FUSE_DEFAULT_MAX_PAGES_PER_REQ), fc->max_pages); WARN_ON(npages <= req->max_pages); pages = fuse_req_pages_alloc(npages, flags, &page_descs); if (!pages) return false; memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages); memcpy(page_descs, req->page_descs, sizeof(struct fuse_page_desc) * req->max_pages); fuse_req_pages_free(req); req->pages = pages; req->page_descs = page_descs; req->max_pages = npages; return true; } void fuse_request_free(struct fuse_req *req) { fuse_req_pages_free(req); kmem_cache_free(fuse_req_cachep, req); } void __fuse_get_request(struct fuse_req *req) { refcount_inc(&req->count); } /* Must be called with > 1 refcount */ static void __fuse_put_request(struct fuse_req *req) { refcount_dec(&req->count); } void fuse_set_initialized(struct fuse_conn *fc) { /* Make sure stores before this are seen on another CPU */ smp_wmb(); fc->initialized = 1; } static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) { return !fc->initialized || (for_background && fc->blocked); } static void fuse_drop_waiting(struct fuse_conn *fc) { /* * lockess check of fc->connected is okay, because atomic_dec_and_test() * provides a memory barrier mached with the one in fuse_wait_aborted() * to ensure no wake-up is missed. */ if (atomic_dec_and_test(&fc->num_waiting) && !READ_ONCE(fc->connected)) { /* wake up aborters */ wake_up_all(&fc->blocked_waitq); } } static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, bool for_background) { struct fuse_req *req; int err; atomic_inc(&fc->num_waiting); if (fuse_block_alloc(fc, for_background)) { err = -EINTR; if (wait_event_killable_exclusive(fc->blocked_waitq, !fuse_block_alloc(fc, for_background))) goto out; } /* Matches smp_wmb() in fuse_set_initialized() */ smp_rmb(); err = -ENOTCONN; if (!fc->connected) goto out; err = -ECONNREFUSED; if (fc->conn_error) goto out; req = fuse_request_alloc(npages); err = -ENOMEM; if (!req) { if (for_background) wake_up(&fc->blocked_waitq); goto out; } req->in.h.uid = from_kuid(fc->user_ns, current_fsuid()); req->in.h.gid = from_kgid(fc->user_ns, current_fsgid()); req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); __set_bit(FR_WAITING, &req->flags); if (for_background) __set_bit(FR_BACKGROUND, &req->flags); if (unlikely(req->in.h.uid == ((uid_t)-1) || req->in.h.gid == ((gid_t)-1))) { fuse_put_request(fc, req); return ERR_PTR(-EOVERFLOW); } return req; out: fuse_drop_waiting(fc); return ERR_PTR(err); } struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) { return __fuse_get_req(fc, npages, false); } EXPORT_SYMBOL_GPL(fuse_get_req); struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, unsigned npages) { return __fuse_get_req(fc, npages, true); } EXPORT_SYMBOL_GPL(fuse_get_req_for_background); /* * Return request in fuse_file->reserved_req. However that may * currently be in use. If that is the case, wait for it to become * available. */ static struct fuse_req *get_reserved_req(struct fuse_conn *fc, struct file *file) { struct fuse_req *req = NULL; struct fuse_inode *fi = get_fuse_inode(file_inode(file)); struct fuse_file *ff = file->private_data; do { wait_event(fc->reserved_req_waitq, ff->reserved_req); spin_lock(&fi->lock); if (ff->reserved_req) { req = ff->reserved_req; ff->reserved_req = NULL; req->stolen_file = get_file(file); } spin_unlock(&fi->lock); } while (!req); return req; } /* * Put stolen request back into fuse_file->reserved_req */ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) { struct file *file = req->stolen_file; struct fuse_inode *fi = get_fuse_inode(file_inode(file)); struct fuse_file *ff = file->private_data; WARN_ON(req->max_pages); spin_lock(&fi->lock); memset(req, 0, sizeof(*req)); fuse_request_init(req, NULL, NULL, 0); BUG_ON(ff->reserved_req); ff->reserved_req = req; wake_up_all(&fc->reserved_req_waitq); spin_unlock(&fi->lock); fput(file); } /* * Gets a requests for a file operation, always succeeds * * This is used for sending the FLUSH request, which must get to * userspace, due to POSIX locks which may need to be unlocked. * * If allocation fails due to OOM, use the reserved request in * fuse_file. * * This is very unlikely to deadlock accidentally, since the * filesystem should not have it's own file open. If deadlock is * intentional, it can still be broken by "aborting" the filesystem. */ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, struct file *file) { struct fuse_req *req; atomic_inc(&fc->num_waiting); wait_event(fc->blocked_waitq, fc->initialized); /* Matches smp_wmb() in fuse_set_initialized() */ smp_rmb(); req = fuse_request_alloc(0); if (!req) req = get_reserved_req(fc, file); req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); __set_bit(FR_WAITING, &req->flags); __clear_bit(FR_BACKGROUND, &req->flags); return req; } void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) { if (refcount_dec_and_test(&req->count)) { if (test_bit(FR_BACKGROUND, &req->flags)) { /* * We get here in the unlikely case that a background * request was allocated but not sent */ spin_lock(&fc->bg_lock); if (!fc->blocked) wake_up(&fc->blocked_waitq); spin_unlock(&fc->bg_lock); } if (test_bit(FR_WAITING, &req->flags)) { __clear_bit(FR_WAITING, &req->flags); fuse_drop_waiting(fc); } if (req->stolen_file) put_reserved_req(fc, req); else fuse_request_free(req); } } EXPORT_SYMBOL_GPL(fuse_put_request); static unsigned len_args(unsigned numargs, struct fuse_arg *args) { unsigned nbytes = 0; unsigned i; for (i = 0; i < numargs; i++) nbytes += args[i].size; return nbytes; } static u64 fuse_get_unique(struct fuse_iqueue *fiq) { fiq->reqctr += FUSE_REQ_ID_STEP; return fiq->reqctr; } static unsigned int fuse_req_hash(u64 unique) { return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); } static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) { req->in.h.len = sizeof(struct fuse_in_header) + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); list_add_tail(&req->list, &fiq->pending); wake_up_locked(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, u64 nodeid, u64 nlookup) { struct fuse_iqueue *fiq = &fc->iq; forget->forget_one.nodeid = nodeid; forget->forget_one.nlookup = nlookup; spin_lock(&fiq->waitq.lock); if (fiq->connected) { fiq->forget_list_tail->next = forget; fiq->forget_list_tail = forget; wake_up_locked(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } else { kfree(forget); } spin_unlock(&fiq->waitq.lock); } static void flush_bg_queue(struct fuse_conn *fc) { struct fuse_iqueue *fiq = &fc->iq; while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; req = list_first_entry(&fc->bg_queue, struct fuse_req, list); list_del(&req->list); fc->active_background++; spin_lock(&fiq->waitq.lock); req->in.h.unique = fuse_get_unique(fiq); queue_request(fiq, req); spin_unlock(&fiq->waitq.lock); } } /* * This function is called when a request is finished. Either a reply * has arrived or it was aborted (and not yet sent) or some error * occurred during communication with userspace, or the device file * was closed. The requester thread is woken up (if still waiting), * the 'end' callback is called if given, else the reference to the * request is released */ static void request_end(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_iqueue *fiq = &fc->iq; if (test_and_set_bit(FR_FINISHED, &req->flags)) goto put_request; /* * test_and_set_bit() implies smp_mb() between bit * changing and below intr_entry check. Pairs with * smp_mb() from queue_interrupt(). */ if (!list_empty(&req->intr_entry)) { spin_lock(&fiq->waitq.lock); list_del_init(&req->intr_entry); spin_unlock(&fiq->waitq.lock); } WARN_ON(test_bit(FR_PENDING, &req->flags)); WARN_ON(test_bit(FR_SENT, &req->flags)); if (test_bit(FR_BACKGROUND, &req->flags)) { spin_lock(&fc->bg_lock); clear_bit(FR_BACKGROUND, &req->flags); if (fc->num_background == fc->max_background) { fc->blocked = 0; wake_up(&fc->blocked_waitq); } else if (!fc->blocked) { /* * Wake up next waiter, if any. It's okay to use * waitqueue_active(), as we've already synced up * fc->blocked with waiters with the wake_up() call * above. */ if (waitqueue_active(&fc->blocked_waitq)) wake_up(&fc->blocked_waitq); } if (fc->num_background == fc->congestion_threshold && fc->sb) { clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); } fc->num_background--; fc->active_background--; flush_bg_queue(fc); spin_unlock(&fc->bg_lock); } else { /* Wake up waiter sleeping in request_wait_answer() */ wake_up(&req->waitq); } if (req->end) req->end(fc, req); put_request: fuse_put_request(fc, req); } static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { spin_lock(&fiq->waitq.lock); /* Check for we've sent request to interrupt this req */ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { spin_unlock(&fiq->waitq.lock); return -EINVAL; } if (list_empty(&req->intr_entry)) { list_add_tail(&req->intr_entry, &fiq->interrupts); /* * Pairs with smp_mb() implied by test_and_set_bit() * from request_end(). */ smp_mb(); if (test_bit(FR_FINISHED, &req->flags)) { list_del_init(&req->intr_entry); spin_unlock(&fiq->waitq.lock); return 0; } wake_up_locked(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } spin_unlock(&fiq->waitq.lock); return 0; } static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_iqueue *fiq = &fc->iq; int err; if (!fc->no_interrupt) { /* Any signal may interrupt this */ err = wait_event_interruptible(req->waitq, test_bit(FR_FINISHED, &req->flags)); if (!err) return; set_bit(FR_INTERRUPTED, &req->flags); /* matches barrier in fuse_dev_do_read() */ smp_mb__after_atomic(); if (test_bit(FR_SENT, &req->flags)) queue_interrupt(fiq, req); } if (!test_bit(FR_FORCE, &req->flags)) { /* Only fatal signals may interrupt this */ err = wait_event_killable(req->waitq, test_bit(FR_FINISHED, &req->flags)); if (!err) return; spin_lock(&fiq->waitq.lock); /* Request is not yet in userspace, bail out */ if (test_bit(FR_PENDING, &req->flags)) { list_del(&req->list); spin_unlock(&fiq->waitq.lock); __fuse_put_request(req); req->out.h.error = -EINTR; return; } spin_unlock(&fiq->waitq.lock); } /* * Either request is already in userspace, or it was forced. * Wait it out. */ wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); } static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_iqueue *fiq = &fc->iq; BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); spin_lock(&fiq->waitq.lock); if (!fiq->connected) { spin_unlock(&fiq->waitq.lock); req->out.h.error = -ENOTCONN; } else { req->in.h.unique = fuse_get_unique(fiq); queue_request(fiq, req); /* acquire extra reference, since request is still needed after request_end() */ __fuse_get_request(req); spin_unlock(&fiq->waitq.lock); request_wait_answer(fc, req); /* Pairs with smp_wmb() in request_end() */ smp_rmb(); } } void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { __set_bit(FR_ISREPLY, &req->flags); if (!test_bit(FR_WAITING, &req->flags)) { __set_bit(FR_WAITING, &req->flags); atomic_inc(&fc->num_waiting); } __fuse_request_send(fc, req); } EXPORT_SYMBOL_GPL(fuse_request_send); static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) { if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; if (fc->minor < 9) { switch (args->in.h.opcode) { case FUSE_LOOKUP: case FUSE_CREATE: case FUSE_MKNOD: case FUSE_MKDIR: case FUSE_SYMLINK: case FUSE_LINK: args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; break; case FUSE_GETATTR: case FUSE_SETATTR: args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; break; } } if (fc->minor < 12) { switch (args->in.h.opcode) { case FUSE_CREATE: args->in.args[0].size = sizeof(struct fuse_open_in); break; case FUSE_MKNOD: args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; break; } } } ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) { struct fuse_req *req; ssize_t ret; req = fuse_get_req(fc, 0); if (IS_ERR(req)) return PTR_ERR(req); /* Needs to be done after fuse_get_req() so that fc->minor is valid */ fuse_adjust_compat(fc, args); req->in.h.opcode = args->in.h.opcode; req->in.h.nodeid = args->in.h.nodeid; req->in.numargs = args->in.numargs; memcpy(req->in.args, args->in.args, args->in.numargs * sizeof(struct fuse_in_arg)); req->out.argvar = args->out.argvar; req->out.numargs = args->out.numargs; memcpy(req->out.args, args->out.args, args->out.numargs * sizeof(struct fuse_arg)); fuse_request_send(fc, req); ret = req->out.h.error; if (!ret && args->out.argvar) { BUG_ON(args->out.numargs != 1); ret = req->out.args[0].size; } fuse_put_request(fc, req); return ret; } bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req) { bool queued = false; WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); if (!test_bit(FR_WAITING, &req->flags)) { __set_bit(FR_WAITING, &req->flags); atomic_inc(&fc->num_waiting); } __set_bit(FR_ISREPLY, &req->flags); spin_lock(&fc->bg_lock); if (likely(fc->connected)) { fc->num_background++; if (fc->num_background == fc->max_background) fc->blocked = 1; if (fc->num_background == fc->congestion_threshold && fc->sb) { set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); } list_add_tail(&req->list, &fc->bg_queue); flush_bg_queue(fc); queued = true; } spin_unlock(&fc->bg_lock); return queued; } void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) { WARN_ON(!req->end); if (!fuse_request_queue_background(fc, req)) { req->out.h.error = -ENOTCONN; req->end(fc, req); fuse_put_request(fc, req); } } EXPORT_SYMBOL_GPL(fuse_request_send_background); static int fuse_request_send_notify_reply(struct fuse_conn *fc, struct fuse_req *req, u64 unique) { int err = -ENODEV; struct fuse_iqueue *fiq = &fc->iq; __clear_bit(FR_ISREPLY, &req->flags); req->in.h.unique = unique; spin_lock(&fiq->waitq.lock); if (fiq->connected) { queue_request(fiq, req); err = 0; } spin_unlock(&fiq->waitq.lock); return err; } void fuse_force_forget(struct file *file, u64 nodeid) { struct inode *inode = file_inode(file); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_forget_in inarg; memset(&inarg, 0, sizeof(inarg)); inarg.nlookup = 1; req = fuse_get_req_nofail_nopages(fc, file); req->in.h.opcode = FUSE_FORGET; req->in.h.nodeid = nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; __clear_bit(FR_ISREPLY, &req->flags); __fuse_request_send(fc, req); /* ignore errors */ fuse_put_request(fc, req); } /* * Lock the request. Up to the next unlock_request() there mustn't be * anything that could cause a page-fault. If the request was already * aborted bail out. */ static int lock_request(struct fuse_req *req) { int err = 0; if (req) { spin_lock(&req->waitq.lock); if (test_bit(FR_ABORTED, &req->flags)) err = -ENOENT; else set_bit(FR_LOCKED, &req->flags); spin_unlock(&req->waitq.lock); } return err; } /* * Unlock request. If it was aborted while locked, caller is responsible * for unlocking and ending the request. */ static int unlock_request(struct fuse_req *req) { int err = 0; if (req) { spin_lock(&req->waitq.lock); if (test_bit(FR_ABORTED, &req->flags)) err = -ENOENT; else clear_bit(FR_LOCKED, &req->flags); spin_unlock(&req->waitq.lock); } return err; } struct fuse_copy_state { int write; struct fuse_req *req; struct iov_iter *iter; struct pipe_buffer *pipebufs; struct pipe_buffer *currbuf; struct pipe_inode_info *pipe; unsigned long nr_segs; struct page *pg; unsigned len; unsigned offset; unsigned move_pages:1; }; static void fuse_copy_init(struct fuse_copy_state *cs, int write, struct iov_iter *iter) { memset(cs, 0, sizeof(*cs)); cs->write = write; cs->iter = iter; } /* Unmap and put previous page of userspace buffer */ static void fuse_copy_finish(struct fuse_copy_state *cs) { if (cs->currbuf) { struct pipe_buffer *buf = cs->currbuf; if (cs->write) buf->len = PAGE_SIZE - cs->len; cs->currbuf = NULL; } else if (cs->pg) { if (cs->write) { flush_dcache_page(cs->pg); set_page_dirty_lock(cs->pg); } put_page(cs->pg); } cs->pg = NULL; } /* * Get another pagefull of userspace buffer, and map it to kernel * address space, and lock request */ static int fuse_copy_fill(struct fuse_copy_state *cs) { struct page *page; int err; err = unlock_request(cs->req); if (err) return err; fuse_copy_finish(cs); if (cs->pipebufs) { struct pipe_buffer *buf = cs->pipebufs; if (!cs->write) { err = pipe_buf_confirm(cs->pipe, buf); if (err) return err; BUG_ON(!cs->nr_segs); cs->currbuf = buf; cs->pg = buf->page; cs->offset = buf->offset; cs->len = buf->len; cs->pipebufs++; cs->nr_segs--; } else { if (cs->nr_segs == cs->pipe->buffers) return -EIO; page = alloc_page(GFP_HIGHUSER); if (!page) return -ENOMEM; buf->page = page; buf->offset = 0; buf->len = 0; cs->currbuf = buf; cs->pg = page; cs->offset = 0; cs->len = PAGE_SIZE; cs->pipebufs++; cs->nr_segs++; } } else { size_t off; err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off); if (err < 0) return err; BUG_ON(!err); cs->len = err; cs->offset = off; cs->pg = page; iov_iter_advance(cs->iter, err); } return lock_request(cs->req); } /* Do as much copy to/from userspace buffer as we can */ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) { unsigned ncpy = min(*size, cs->len); if (val) { void *pgaddr = kmap_atomic(cs->pg); void *buf = pgaddr + cs->offset; if (cs->write) memcpy(buf, *val, ncpy); else memcpy(*val, buf, ncpy); kunmap_atomic(pgaddr); *val += ncpy; } *size -= ncpy; cs->len -= ncpy; cs->offset += ncpy; return ncpy; } static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | 1 << PG_reclaim))) { printk(KERN_WARNING "fuse: trying to steal weird page\n"); printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); return 1; } return 0; } static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) { int err; struct page *oldpage = *pagep; struct page *newpage; struct pipe_buffer *buf = cs->pipebufs; err = unlock_request(cs->req); if (err) return err; fuse_copy_finish(cs); err = pipe_buf_confirm(cs->pipe, buf); if (err) return err; BUG_ON(!cs->nr_segs); cs->currbuf = buf; cs->len = buf->len; cs->pipebufs++; cs->nr_segs--; if (cs->len != PAGE_SIZE) goto out_fallback; if (pipe_buf_steal(cs->pipe, buf) != 0) goto out_fallback; newpage = buf->page; if (!PageUptodate(newpage)) SetPageUptodate(newpage); ClearPageMappedToDisk(newpage); if (fuse_check_page(newpage) != 0) goto out_fallback_unlock; /* * This is a new and locked page, it shouldn't be mapped or * have any special flags on it */ if (WARN_ON(page_mapped(oldpage))) goto out_fallback_unlock; if (WARN_ON(page_has_private(oldpage))) goto out_fallback_unlock; if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) goto out_fallback_unlock; if (WARN_ON(PageMlocked(oldpage))) goto out_fallback_unlock; err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); if (err) { unlock_page(newpage); return err; } get_page(newpage); if (!(buf->flags & PIPE_BUF_FLAG_LRU)) lru_cache_add_file(newpage); err = 0; spin_lock(&cs->req->waitq.lock); if (test_bit(FR_ABORTED, &cs->req->flags)) err = -ENOENT; else *pagep = newpage; spin_unlock(&cs->req->waitq.lock); if (err) { unlock_page(newpage); put_page(newpage); return err; } unlock_page(oldpage); put_page(oldpage); cs->len = 0; return 0; out_fallback_unlock: unlock_page(newpage); out_fallback: cs->pg = buf->page; cs->offset = buf->offset; err = lock_request(cs->req); if (err) return err; return 1; } static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, unsigned offset, unsigned count) { struct pipe_buffer *buf; int err; if (cs->nr_segs == cs->pipe->buffers) return -EIO; err = unlock_request(cs->req); if (err) return err; fuse_copy_finish(cs); buf = cs->pipebufs; get_page(page); buf->page = page; buf->offset = offset; buf->len = count; cs->pipebufs++; cs->nr_segs++; cs->len = 0; return 0; } /* * Copy a page in the request to/from the userspace buffer. Must be * done atomically */ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, unsigned offset, unsigned count, int zeroing) { int err; struct page *page = *pagep; if (page && zeroing && count < PAGE_SIZE) clear_highpage(page); while (count) { if (cs->write && cs->pipebufs && page) { return fuse_ref_page(cs, page, offset, count); } else if (!cs->len) { if (cs->move_pages && page && offset == 0 && count == PAGE_SIZE) { err = fuse_try_move_page(cs, pagep); if (err <= 0) return err; } else { err = fuse_copy_fill(cs); if (err) return err; } } if (page) { void *mapaddr = kmap_atomic(page); void *buf = mapaddr + offset; offset += fuse_copy_do(cs, &buf, &count); kunmap_atomic(mapaddr); } else offset += fuse_copy_do(cs, NULL, &count); } if (page && !cs->write) flush_dcache_page(page); return 0; } /* Copy pages in the request to/from userspace buffer */ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, int zeroing) { unsigned i; struct fuse_req *req = cs->req; for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { int err; unsigned offset = req->page_descs[i].offset; unsigned count = min(nbytes, req->page_descs[i].length); err = fuse_copy_page(cs, &req->pages[i], offset, count, zeroing); if (err) return err; nbytes -= count; } return 0; } /* Copy a single argument in the request to/from userspace buffer */ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) { while (size) { if (!cs->len) { int err = fuse_copy_fill(cs); if (err) return err; } fuse_copy_do(cs, &val, &size); } return 0; } /* Copy request arguments to/from userspace buffer */ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, unsigned argpages, struct fuse_arg *args, int zeroing) { int err = 0; unsigned i; for (i = 0; !err && i < numargs; i++) { struct fuse_arg *arg = &args[i]; if (i == numargs - 1 && argpages) err = fuse_copy_pages(cs, arg->size, zeroing); else err = fuse_copy_one(cs, arg->value, arg->size); } return err; } static int forget_pending(struct fuse_iqueue *fiq) { return fiq->forget_list_head.next != NULL; } static int request_pending(struct fuse_iqueue *fiq) { return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || forget_pending(fiq); } /* * Transfer an interrupt request to userspace * * Unlike other requests this is assembled on demand, without a need * to allocate a separate fuse_req structure. * * Called with fiq->waitq.lock held, releases it */ static int fuse_read_interrupt(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes, struct fuse_req *req) __releases(fiq->waitq.lock) { struct fuse_in_header ih; struct fuse_interrupt_in arg; unsigned reqsize = sizeof(ih) + sizeof(arg); int err; list_del_init(&req->intr_entry); memset(&ih, 0, sizeof(ih)); memset(&arg, 0, sizeof(arg)); ih.len = reqsize; ih.opcode = FUSE_INTERRUPT; ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); arg.unique = req->in.h.unique; spin_unlock(&fiq->waitq.lock); if (nbytes < reqsize) return -EINVAL; err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); fuse_copy_finish(cs); return err ? err : reqsize; } static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, unsigned max, unsigned *countp) { struct fuse_forget_link *head = fiq->forget_list_head.next; struct fuse_forget_link **newhead = &head; unsigned count; for (count = 0; *newhead != NULL && count < max; count++) newhead = &(*newhead)->next; fiq->forget_list_head.next = *newhead; *newhead = NULL; if (fiq->forget_list_head.next == NULL) fiq->forget_list_tail = &fiq->forget_list_head; if (countp != NULL) *countp = count; return head; } static int fuse_read_single_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) __releases(fiq->waitq.lock) { int err; struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); struct fuse_forget_in arg = { .nlookup = forget->forget_one.nlookup, }; struct fuse_in_header ih = { .opcode = FUSE_FORGET, .nodeid = forget->forget_one.nodeid, .unique = fuse_get_unique(fiq), .len = sizeof(ih) + sizeof(arg), }; spin_unlock(&fiq->waitq.lock); kfree(forget); if (nbytes < ih.len) return -EINVAL; err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); fuse_copy_finish(cs); if (err) return err; return ih.len; } static int fuse_read_batch_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) __releases(fiq->waitq.lock) { int err; unsigned max_forgets; unsigned count; struct fuse_forget_link *head; struct fuse_batch_forget_in arg = { .count = 0 }; struct fuse_in_header ih = { .opcode = FUSE_BATCH_FORGET, .unique = fuse_get_unique(fiq), .len = sizeof(ih) + sizeof(arg), }; if (nbytes < ih.len) { spin_unlock(&fiq->waitq.lock); return -EINVAL; } max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); head = dequeue_forget(fiq, max_forgets, &count); spin_unlock(&fiq->waitq.lock); arg.count = count; ih.len += count * sizeof(struct fuse_forget_one); err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); while (head) { struct fuse_forget_link *forget = head; if (!err) { err = fuse_copy_one(cs, &forget->forget_one, sizeof(forget->forget_one)); } head = forget->next; kfree(forget); } fuse_copy_finish(cs); if (err) return err; return ih.len; } static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) __releases(fiq->waitq.lock) { if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) return fuse_read_single_forget(fiq, cs, nbytes); else return fuse_read_batch_forget(fiq, cs, nbytes); } /* * Read a single request into the userspace filesystem's buffer. This * function waits until a request is available, then removes it from * the pending list and copies request data to userspace buffer. If * no reply is needed (FORGET) or request has been aborted or there * was an error during the copying then it's finished by calling * request_end(). Otherwise add it to the processing list, and set * the 'sent' flag. */ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, struct fuse_copy_state *cs, size_t nbytes) { ssize_t err; struct fuse_conn *fc = fud->fc; struct fuse_iqueue *fiq = &fc->iq; struct fuse_pqueue *fpq = &fud->pq; struct fuse_req *req; struct fuse_in *in; unsigned reqsize; unsigned int hash; restart: spin_lock(&fiq->waitq.lock); err = -EAGAIN; if ((file->f_flags & O_NONBLOCK) && fiq->connected && !request_pending(fiq)) goto err_unlock; err = wait_event_interruptible_exclusive_locked(fiq->waitq, !fiq->connected || request_pending(fiq)); if (err) goto err_unlock; if (!fiq->connected) { err = fc->aborted ? -ECONNABORTED : -ENODEV; goto err_unlock; } if (!list_empty(&fiq->interrupts)) { req = list_entry(fiq->interrupts.next, struct fuse_req, intr_entry); return fuse_read_interrupt(fiq, cs, nbytes, req); } if (forget_pending(fiq)) { if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) return fuse_read_forget(fc, fiq, cs, nbytes); if (fiq->forget_batch <= -8) fiq->forget_batch = 16; } req = list_entry(fiq->pending.next, struct fuse_req, list); clear_bit(FR_PENDING, &req->flags); list_del_init(&req->list); spin_unlock(&fiq->waitq.lock); in = &req->in; reqsize = in->h.len; /* If request is too large, reply with an error and restart the read */ if (nbytes < reqsize) { req->out.h.error = -EIO; /* SETXATTR is special, since it may contain too large data */ if (in->h.opcode == FUSE_SETXATTR) req->out.h.error = -E2BIG; request_end(fc, req); goto restart; } spin_lock(&fpq->lock); list_add(&req->list, &fpq->io); spin_unlock(&fpq->lock); cs->req = req; err = fuse_copy_one(cs, &in->h, sizeof(in->h)); if (!err) err = fuse_copy_args(cs, in->numargs, in->argpages, (struct fuse_arg *) in->args, 0); fuse_copy_finish(cs); spin_lock(&fpq->lock); clear_bit(FR_LOCKED, &req->flags); if (!fpq->connected) { err = fc->aborted ? -ECONNABORTED : -ENODEV; goto out_end; } if (err) { req->out.h.error = -EIO; goto out_end; } if (!test_bit(FR_ISREPLY, &req->flags)) { err = reqsize; goto out_end; } hash = fuse_req_hash(req->in.h.unique); list_move_tail(&req->list, &fpq->processing[hash]); __fuse_get_request(req); set_bit(FR_SENT, &req->flags); spin_unlock(&fpq->lock); /* matches barrier in request_wait_answer() */ smp_mb__after_atomic(); if (test_bit(FR_INTERRUPTED, &req->flags)) queue_interrupt(fiq, req); fuse_put_request(fc, req); return reqsize; out_end: if (!test_bit(FR_PRIVATE, &req->flags)) list_del_init(&req->list); spin_unlock(&fpq->lock); request_end(fc, req); return err; err_unlock: spin_unlock(&fiq->waitq.lock); return err; } static int fuse_dev_open(struct inode *inode, struct file *file) { /* * The fuse device's file's private_data is used to hold * the fuse_conn(ection) when it is mounted, and is used to * keep track of whether the file has been mounted already. */ file->private_data = NULL; return 0; } static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) { struct fuse_copy_state cs; struct file *file = iocb->ki_filp; struct fuse_dev *fud = fuse_get_dev(file); if (!fud) return -EPERM; if (!iter_is_iovec(to)) return -EINVAL; fuse_copy_init(&cs, 1, to); return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); } static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { int total, ret; int page_nr = 0; struct pipe_buffer *bufs; struct fuse_copy_state cs; struct fuse_dev *fud = fuse_get_dev(in); if (!fud) return -EPERM; bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; fuse_copy_init(&cs, 1, NULL); cs.pipebufs = bufs; cs.pipe = pipe; ret = fuse_dev_do_read(fud, in, &cs, len); if (ret < 0) goto out; if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { ret = -EIO; goto out; } for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { /* * Need to be careful about this. Having buf->ops in module * code can Oops if the buffer persists after module unload. */ bufs[page_nr].ops = &nosteal_pipe_buf_ops; bufs[page_nr].flags = 0; ret = add_to_pipe(pipe, &bufs[page_nr++]); if (unlikely(ret < 0)) break; } if (total) ret = total; out: for (; page_nr < cs.nr_segs; page_nr++) put_page(bufs[page_nr].page); kvfree(bufs); return ret; } static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_poll_wakeup_out outarg; int err = -EINVAL; if (size != sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; fuse_copy_finish(cs); return fuse_notify_poll_wakeup(fc, &outarg); err: fuse_copy_finish(cs); return err; } static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_inval_inode_out outarg; int err = -EINVAL; if (size != sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; fuse_copy_finish(cs); down_read(&fc->killsb); err = -ENOENT; if (fc->sb) { err = fuse_reverse_inval_inode(fc->sb, outarg.ino, outarg.off, outarg.len); } up_read(&fc->killsb); return err; err: fuse_copy_finish(cs); return err; } static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_inval_entry_out outarg; int err = -ENOMEM; char *buf; struct qstr name; buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); if (!buf) goto err; err = -EINVAL; if (size < sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; err = -ENAMETOOLONG; if (outarg.namelen > FUSE_NAME_MAX) goto err; err = -EINVAL; if (size != sizeof(outarg) + outarg.namelen + 1) goto err; name.name = buf; name.len = outarg.namelen; err = fuse_copy_one(cs, buf, outarg.namelen + 1); if (err) goto err; fuse_copy_finish(cs); buf[outarg.namelen] = 0; down_read(&fc->killsb); err = -ENOENT; if (fc->sb) err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); up_read(&fc->killsb); kfree(buf); return err; err: kfree(buf); fuse_copy_finish(cs); return err; } static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_delete_out outarg; int err = -ENOMEM; char *buf; struct qstr name; buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); if (!buf) goto err; err = -EINVAL; if (size < sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; err = -ENAMETOOLONG; if (outarg.namelen > FUSE_NAME_MAX) goto err; err = -EINVAL; if (size != sizeof(outarg) + outarg.namelen + 1) goto err; name.name = buf; name.len = outarg.namelen; err = fuse_copy_one(cs, buf, outarg.namelen + 1); if (err) goto err; fuse_copy_finish(cs); buf[outarg.namelen] = 0; down_read(&fc->killsb); err = -ENOENT; if (fc->sb) err = fuse_reverse_inval_entry(fc->sb, outarg.parent, outarg.child, &name); up_read(&fc->killsb); kfree(buf); return err; err: kfree(buf); fuse_copy_finish(cs); return err; } static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_store_out outarg; struct inode *inode; struct address_space *mapping; u64 nodeid; int err; pgoff_t index; unsigned int offset; unsigned int num; loff_t file_size; loff_t end; err = -EINVAL; if (size < sizeof(outarg)) goto out_finish; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto out_finish; err = -EINVAL; if (size - sizeof(outarg) != outarg.size) goto out_finish; nodeid = outarg.nodeid; down_read(&fc->killsb); err = -ENOENT; if (!fc->sb) goto out_up_killsb; inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); if (!inode) goto out_up_killsb; mapping = inode->i_mapping; index = outarg.offset >> PAGE_SHIFT; offset = outarg.offset & ~PAGE_MASK; file_size = i_size_read(inode); end = outarg.offset + outarg.size; if (end > file_size) { file_size = end; fuse_write_update_size(inode, file_size); } num = outarg.size; while (num) { struct page *page; unsigned int this_num; err = -ENOMEM; page = find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); if (!page) goto out_iput; this_num = min_t(unsigned, num, PAGE_SIZE - offset); err = fuse_copy_page(cs, &page, offset, this_num, 0); if (!err && offset == 0 && (this_num == PAGE_SIZE || file_size == end)) SetPageUptodate(page); unlock_page(page); put_page(page); if (err) goto out_iput; num -= this_num; offset = 0; index++; } err = 0; out_iput: iput(inode); out_up_killsb: up_read(&fc->killsb); out_finish: fuse_copy_finish(cs); return err; } static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) { release_pages(req->pages, req->num_pages); } static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, struct fuse_notify_retrieve_out *outarg) { int err; struct address_space *mapping = inode->i_mapping; struct fuse_req *req; pgoff_t index; loff_t file_size; unsigned int num; unsigned int offset; size_t total_len = 0; unsigned int num_pages; offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); num = outarg->size; if (outarg->offset > file_size) num = 0; else if (outarg->offset + num > file_size) num = file_size - outarg->offset; num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = min(num_pages, fc->max_pages); req = fuse_get_req(fc, num_pages); if (IS_ERR(req)) return PTR_ERR(req); req->in.h.opcode = FUSE_NOTIFY_REPLY; req->in.h.nodeid = outarg->nodeid; req->in.numargs = 2; req->in.argpages = 1; req->end = fuse_retrieve_end; index = outarg->offset >> PAGE_SHIFT; while (num && req->num_pages < num_pages) { struct page *page; unsigned int this_num; page = find_get_page(mapping, index); if (!page) break; this_num = min_t(unsigned, num, PAGE_SIZE - offset); req->pages[req->num_pages] = page; req->page_descs[req->num_pages].offset = offset; req->page_descs[req->num_pages].length = this_num; req->num_pages++; offset = 0; num -= this_num; total_len += this_num; index++; } req->misc.retrieve_in.offset = outarg->offset; req->misc.retrieve_in.size = total_len; req->in.args[0].size = sizeof(req->misc.retrieve_in); req->in.args[0].value = &req->misc.retrieve_in; req->in.args[1].size = total_len; err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); if (err) { fuse_retrieve_end(fc, req); fuse_put_request(fc, req); } return err; } static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_retrieve_out outarg; struct inode *inode; int err; err = -EINVAL; if (size != sizeof(outarg)) goto copy_finish; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto copy_finish; fuse_copy_finish(cs); down_read(&fc->killsb); err = -ENOENT; if (fc->sb) { u64 nodeid = outarg.nodeid; inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); if (inode) { err = fuse_retrieve(fc, inode, &outarg); iput(inode); } } up_read(&fc->killsb); return err; copy_finish: fuse_copy_finish(cs); return err; } static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { /* Don't try to move pages (yet) */ cs->move_pages = 0; switch (code) { case FUSE_NOTIFY_POLL: return fuse_notify_poll(fc, size, cs); case FUSE_NOTIFY_INVAL_INODE: return fuse_notify_inval_inode(fc, size, cs); case FUSE_NOTIFY_INVAL_ENTRY: return fuse_notify_inval_entry(fc, size, cs); case FUSE_NOTIFY_STORE: return fuse_notify_store(fc, size, cs); case FUSE_NOTIFY_RETRIEVE: return fuse_notify_retrieve(fc, size, cs); case FUSE_NOTIFY_DELETE: return fuse_notify_delete(fc, size, cs); default: fuse_copy_finish(cs); return -EINVAL; } } /* Look up request on processing list by unique ID */ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) { unsigned int hash = fuse_req_hash(unique); struct fuse_req *req; list_for_each_entry(req, &fpq->processing[hash], list) { if (req->in.h.unique == unique) return req; } return NULL; } static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, unsigned nbytes) { unsigned reqsize = sizeof(struct fuse_out_header); if (out->h.error) return nbytes != reqsize ? -EINVAL : 0; reqsize += len_args(out->numargs, out->args); if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) return -EINVAL; else if (reqsize > nbytes) { struct fuse_arg *lastarg = &out->args[out->numargs-1]; unsigned diffsize = reqsize - nbytes; if (diffsize > lastarg->size) return -EINVAL; lastarg->size -= diffsize; } return fuse_copy_args(cs, out->numargs, out->argpages, out->args, out->page_zeroing); } /* * Write a single reply to a request. First the header is copied from * the write buffer. The request is then searched on the processing * list by the unique ID found in the header. If found, then remove * it from the list and copy the rest of the buffer to the request. * The request is finished by calling request_end() */ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, struct fuse_copy_state *cs, size_t nbytes) { int err; struct fuse_conn *fc = fud->fc; struct fuse_pqueue *fpq = &fud->pq; struct fuse_req *req; struct fuse_out_header oh; err = -EINVAL; if (nbytes < sizeof(struct fuse_out_header)) goto out; err = fuse_copy_one(cs, &oh, sizeof(oh)); if (err) goto copy_finish; err = -EINVAL; if (oh.len != nbytes) goto copy_finish; /* * Zero oh.unique indicates unsolicited notification message * and error contains notification code. */ if (!oh.unique) { err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); goto out; } err = -EINVAL; if (oh.error <= -1000 || oh.error > 0) goto copy_finish; spin_lock(&fpq->lock); req = NULL; if (fpq->connected) req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); err = -ENOENT; if (!req) { spin_unlock(&fpq->lock); goto copy_finish; } /* Is it an interrupt reply ID? */ if (oh.unique & FUSE_INT_REQ_BIT) { __fuse_get_request(req); spin_unlock(&fpq->lock); err = 0; if (nbytes != sizeof(struct fuse_out_header)) err = -EINVAL; else if (oh.error == -ENOSYS) fc->no_interrupt = 1; else if (oh.error == -EAGAIN) err = queue_interrupt(&fc->iq, req); fuse_put_request(fc, req); goto copy_finish; } clear_bit(FR_SENT, &req->flags); list_move(&req->list, &fpq->io); req->out.h = oh; set_bit(FR_LOCKED, &req->flags); spin_unlock(&fpq->lock); cs->req = req; if (!req->out.page_replace) cs->move_pages = 0; err = copy_out_args(cs, &req->out, nbytes); fuse_copy_finish(cs); spin_lock(&fpq->lock); clear_bit(FR_LOCKED, &req->flags); if (!fpq->connected) err = -ENOENT; else if (err) req->out.h.error = -EIO; if (!test_bit(FR_PRIVATE, &req->flags)) list_del_init(&req->list); spin_unlock(&fpq->lock); request_end(fc, req); out: return err ? err : nbytes; copy_finish: fuse_copy_finish(cs); goto out; } static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) { struct fuse_copy_state cs; struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp); if (!fud) return -EPERM; if (!iter_is_iovec(from)) return -EINVAL; fuse_copy_init(&cs, 0, from); return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); } static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { unsigned nbuf; unsigned idx; struct pipe_buffer *bufs; struct fuse_copy_state cs; struct fuse_dev *fud; size_t rem; ssize_t ret; fud = fuse_get_dev(out); if (!fud) return -EPERM; pipe_lock(pipe); bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) { pipe_unlock(pipe); return -ENOMEM; } nbuf = 0; rem = 0; for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; ret = -EINVAL; if (rem < len) { pipe_unlock(pipe); goto out; } rem = len; while (rem) { struct pipe_buffer *ibuf; struct pipe_buffer *obuf; BUG_ON(nbuf >= pipe->buffers); BUG_ON(!pipe->nrbufs); ibuf = &pipe->bufs[pipe->curbuf]; obuf = &bufs[nbuf]; if (rem >= ibuf->len) { *obuf = *ibuf; ibuf->ops = NULL; pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; } else { pipe_buf_get(pipe, ibuf); *obuf = *ibuf; obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->len = rem; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } nbuf++; rem -= obuf->len; } pipe_unlock(pipe); fuse_copy_init(&cs, 0, NULL); cs.pipebufs = bufs; cs.nr_segs = nbuf; cs.pipe = pipe; if (flags & SPLICE_F_MOVE) cs.move_pages = 1; ret = fuse_dev_do_write(fud, &cs, len); pipe_lock(pipe); for (idx = 0; idx < nbuf; idx++) pipe_buf_release(pipe, &bufs[idx]); pipe_unlock(pipe); out: kvfree(bufs); return ret; } static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) { __poll_t mask = EPOLLOUT | EPOLLWRNORM; struct fuse_iqueue *fiq; struct fuse_dev *fud = fuse_get_dev(file); if (!fud) return EPOLLERR; fiq = &fud->fc->iq; poll_wait(file, &fiq->waitq, wait); spin_lock(&fiq->waitq.lock); if (!fiq->connected) mask = EPOLLERR; else if (request_pending(fiq)) mask |= EPOLLIN | EPOLLRDNORM; spin_unlock(&fiq->waitq.lock); return mask; } /* Abort all requests on the given list (pending or processing) */ static void end_requests(struct fuse_conn *fc, struct list_head *head) { while (!list_empty(head)) { struct fuse_req *req; req = list_entry(head->next, struct fuse_req, list); req->out.h.error = -ECONNABORTED; clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); request_end(fc, req); } } static void end_polls(struct fuse_conn *fc) { struct rb_node *p; p = rb_first(&fc->polled_files); while (p) { struct fuse_file *ff; ff = rb_entry(p, struct fuse_file, polled_node); wake_up_interruptible_all(&ff->poll_wait); p = rb_next(p); } } /* * Abort all requests. * * Emergency exit in case of a malicious or accidental deadlock, or just a hung * filesystem. * * The same effect is usually achievable through killing the filesystem daemon * and all users of the filesystem. The exception is the combination of an * asynchronous request and the tricky deadlock (see * Documentation/filesystems/fuse.txt). * * Aborting requests under I/O goes as follows: 1: Separate out unlocked * requests, they should be finished off immediately. Locked requests will be * finished after unlock; see unlock_request(). 2: Finish off the unlocked * requests. It is possible that some request will finish before we can. This * is OK, the request will in that case be removed from the list before we touch * it. */ void fuse_abort_conn(struct fuse_conn *fc) { struct fuse_iqueue *fiq = &fc->iq; spin_lock(&fc->lock); if (fc->connected) { struct fuse_dev *fud; struct fuse_req *req, *next; LIST_HEAD(to_end); unsigned int i; /* Background queuing checks fc->connected under bg_lock */ spin_lock(&fc->bg_lock); fc->connected = 0; spin_unlock(&fc->bg_lock); fuse_set_initialized(fc); list_for_each_entry(fud, &fc->devices, entry) { struct fuse_pqueue *fpq = &fud->pq; spin_lock(&fpq->lock); fpq->connected = 0; list_for_each_entry_safe(req, next, &fpq->io, list) { req->out.h.error = -ECONNABORTED; spin_lock(&req->waitq.lock); set_bit(FR_ABORTED, &req->flags); if (!test_bit(FR_LOCKED, &req->flags)) { set_bit(FR_PRIVATE, &req->flags); __fuse_get_request(req); list_move(&req->list, &to_end); } spin_unlock(&req->waitq.lock); } for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) list_splice_tail_init(&fpq->processing[i], &to_end); spin_unlock(&fpq->lock); } spin_lock(&fc->bg_lock); fc->blocked = 0; fc->max_background = UINT_MAX; flush_bg_queue(fc); spin_unlock(&fc->bg_lock); spin_lock(&fiq->waitq.lock); fiq->connected = 0; list_for_each_entry(req, &fiq->pending, list) clear_bit(FR_PENDING, &req->flags); list_splice_tail_init(&fiq->pending, &to_end); while (forget_pending(fiq)) kfree(dequeue_forget(fiq, 1, NULL)); wake_up_all_locked(&fiq->waitq); spin_unlock(&fiq->waitq.lock); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); end_polls(fc); wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); end_requests(fc, &to_end); } else { spin_unlock(&fc->lock); } } EXPORT_SYMBOL_GPL(fuse_abort_conn); void fuse_wait_aborted(struct fuse_conn *fc) { /* matches implicit memory barrier in fuse_drop_waiting() */ smp_mb(); wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); } int fuse_dev_release(struct inode *inode, struct file *file) { struct fuse_dev *fud = fuse_get_dev(file); if (fud) { struct fuse_conn *fc = fud->fc; struct fuse_pqueue *fpq = &fud->pq; LIST_HEAD(to_end); unsigned int i; spin_lock(&fpq->lock); WARN_ON(!list_empty(&fpq->io)); for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) list_splice_init(&fpq->processing[i], &to_end); spin_unlock(&fpq->lock); end_requests(fc, &to_end); /* Are we the last open device? */ if (atomic_dec_and_test(&fc->dev_count)) { WARN_ON(fc->iq.fasync != NULL); fuse_abort_conn(fc); } fuse_dev_free(fud); } return 0; } EXPORT_SYMBOL_GPL(fuse_dev_release); static int fuse_dev_fasync(int fd, struct file *file, int on) { struct fuse_dev *fud = fuse_get_dev(file); if (!fud) return -EPERM; /* No locking - fasync_helper does its own locking */ return fasync_helper(fd, file, on, &fud->fc->iq.fasync); } static int fuse_device_clone(struct fuse_conn *fc, struct file *new) { struct fuse_dev *fud; if (new->private_data) return -EINVAL; fud = fuse_dev_alloc(fc); if (!fud) return -ENOMEM; new->private_data = fud; atomic_inc(&fc->dev_count); return 0; } static long fuse_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = -ENOTTY; if (cmd == FUSE_DEV_IOC_CLONE) { int oldfd; err = -EFAULT; if (!get_user(oldfd, (__u32 __user *) arg)) { struct file *old = fget(oldfd); err = -EINVAL; if (old) { struct fuse_dev *fud = NULL; /* * Check against file->f_op because CUSE * uses the same ioctl handler. */ if (old->f_op == file->f_op && old->f_cred->user_ns == file->f_cred->user_ns) fud = fuse_get_dev(old); if (fud) { mutex_lock(&fuse_mutex); err = fuse_device_clone(fud->fc, file); mutex_unlock(&fuse_mutex); } fput(old); } } } return err; } const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, .open = fuse_dev_open, .llseek = no_llseek, .read_iter = fuse_dev_read, .splice_read = fuse_dev_splice_read, .write_iter = fuse_dev_write, .splice_write = fuse_dev_splice_write, .poll = fuse_dev_poll, .release = fuse_dev_release, .fasync = fuse_dev_fasync, .unlocked_ioctl = fuse_dev_ioctl, .compat_ioctl = fuse_dev_ioctl, }; EXPORT_SYMBOL_GPL(fuse_dev_operations); static struct miscdevice fuse_miscdevice = { .minor = FUSE_MINOR, .name = "fuse", .fops = &fuse_dev_operations, }; int __init fuse_dev_init(void) { int err = -ENOMEM; fuse_req_cachep = kmem_cache_create("fuse_request", sizeof(struct fuse_req), 0, 0, NULL); if (!fuse_req_cachep) goto out; err = misc_register(&fuse_miscdevice); if (err) goto out_cache_clean; return 0; out_cache_clean: kmem_cache_destroy(fuse_req_cachep); out: return err; } void fuse_dev_cleanup(void) { misc_deregister(&fuse_miscdevice); kmem_cache_destroy(fuse_req_cachep); }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_820_0
crossvul-cpp_data_bad_4020_3
/* exif-mnote-data-pentax.c * * Copyright (c) 2002, 2003 Lutz Mueller <lutz@users.sourceforge.net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA. */ #include "config.h" #include "exif-mnote-data-pentax.h" #include <stdlib.h> #include <string.h> #include <stdio.h> #include <libexif/exif-byte-order.h> #include <libexif/exif-utils.h> #define CHECKOVERFLOW(offset,datasize,structsize) (( offset >= datasize) || (structsize > datasize) || (offset > datasize - structsize )) static void exif_mnote_data_pentax_clear (ExifMnoteDataPentax *n) { ExifMnoteData *d = (ExifMnoteData *) n; unsigned int i; if (!n) return; if (n->entries) { for (i = 0; i < n->count; i++) if (n->entries[i].data) { exif_mem_free (d->mem, n->entries[i].data); n->entries[i].data = NULL; } exif_mem_free (d->mem, n->entries); n->entries = NULL; n->count = 0; } } static void exif_mnote_data_pentax_free (ExifMnoteData *n) { if (!n) return; exif_mnote_data_pentax_clear ((ExifMnoteDataPentax *) n); } static char * exif_mnote_data_pentax_get_value (ExifMnoteData *d, unsigned int i, char *val, unsigned int maxlen) { ExifMnoteDataPentax *n = (ExifMnoteDataPentax *) d; if (!n) return NULL; if (n->count <= i) return NULL; return mnote_pentax_entry_get_value (&n->entries[i], val, maxlen); } /** * @brief save the MnoteData from ne to buf * * @param ne extract the data from this structure * @param *buf write the mnoteData to this buffer (buffer will be allocated) * @param buf_size the final size of the buffer */ static void exif_mnote_data_pentax_save (ExifMnoteData *ne, unsigned char **buf, unsigned int *buf_size) { ExifMnoteDataPentax *n = (ExifMnoteDataPentax *) ne; size_t i, datao, base = 0, /* internal MakerNote tag number offset */ o2 = 4 + 2; /* offset to first tag entry, past header */ if (!n || !buf || !buf_size) return; datao = n->offset; /* this MakerNote style uses offsets based on main IFD, not makernote IFD */ /* * Allocate enough memory for header, the number of entries, entries, * and next IFD pointer */ *buf_size = o2 + 2 + n->count * 12 + 4; switch (n->version) { case casioV2: base = MNOTE_PENTAX2_TAG_BASE; *buf = exif_mem_alloc (ne->mem, *buf_size); if (!*buf) { EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataPentax", *buf_size); return; } /* Write the magic header */ strcpy ((char *)*buf, "QVC"); exif_set_short (*buf + 4, n->order, (ExifShort) 0); break; case pentaxV3: base = MNOTE_PENTAX2_TAG_BASE; *buf = exif_mem_alloc (ne->mem, *buf_size); if (!*buf) { EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataPentax", *buf_size); return; } /* Write the magic header */ strcpy ((char *)*buf, "AOC"); exif_set_short (*buf + 4, n->order, (ExifShort) ( (n->order == EXIF_BYTE_ORDER_INTEL) ? ('I' << 8) | 'I' : ('M' << 8) | 'M')); break; case pentaxV2: base = MNOTE_PENTAX2_TAG_BASE; *buf = exif_mem_alloc (ne->mem, *buf_size); if (!*buf) { EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataPentax", *buf_size); return; } /* Write the magic header */ strcpy ((char *)*buf, "AOC"); exif_set_short (*buf + 4, n->order, (ExifShort) 0); break; case pentaxV1: /* It looks like this format doesn't have a magic header as * such, just has a fixed number of entries equal to 0x001b */ *buf_size -= 6; o2 -= 6; *buf = exif_mem_alloc (ne->mem, *buf_size); if (!*buf) { EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataPentax", *buf_size); return; } break; default: /* internal error */ return; } /* Write the number of entries. */ exif_set_short (*buf + o2, n->order, (ExifShort) n->count); o2 += 2; /* Save each entry */ for (i = 0; i < n->count; i++) { size_t doff; /* offset to current data portion of tag */ size_t s; unsigned char *t; size_t o = o2 + i * 12; /* current offset into output buffer */ exif_set_short (*buf + o + 0, n->order, (ExifShort) (n->entries[i].tag - base)); exif_set_short (*buf + o + 2, n->order, (ExifShort) n->entries[i].format); exif_set_long (*buf + o + 4, n->order, n->entries[i].components); o += 8; s = exif_format_get_size (n->entries[i].format) * n->entries[i].components; if (s > 65536) { /* Corrupt data: EXIF data size is limited to the * maximum size of a JPEG segment (64 kb). */ continue; } if (s > 4) { size_t ts = *buf_size + s; doff = *buf_size; t = exif_mem_realloc (ne->mem, *buf, sizeof (char) * ts); if (!t) { EXIF_LOG_NO_MEMORY(ne->log, "ExifMnoteDataPentax", ts); return; } *buf = t; *buf_size = ts; exif_set_long (*buf + o, n->order, datao + doff); } else doff = o; /* Write the data. */ if (n->entries[i].data) { memcpy (*buf + doff, n->entries[i].data, s); } else { /* Most certainly damaged input file */ memset (*buf + doff, 0, s); } } /* Sanity check the buffer size */ if (*buf_size < (o2 + n->count * 12 + 4)) { exif_log (ne->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifMnoteDataPentax", "Buffer overflow"); } /* Reset next IFD pointer */ exif_set_long (*buf + o2 + n->count * 12, n->order, 0); } static void exif_mnote_data_pentax_load (ExifMnoteData *en, const unsigned char *buf, unsigned int buf_size) { ExifMnoteDataPentax *n = (ExifMnoteDataPentax *) en; size_t i, tcount, o, datao, base = 0; ExifShort c; if (!n || !buf || !buf_size) { exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifMnoteDataPentax", "Short MakerNote"); return; } datao = 6 + n->offset; if (CHECKOVERFLOW(datao, buf_size, 8)) { exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifMnoteDataPentax", "Short MakerNote"); return; } /* Detect variant of Pentax/Casio MakerNote found */ if (!memcmp(buf + datao, "AOC", 4)) { if ((buf[datao + 4] == 'I') && (buf[datao + 5] == 'I')) { n->version = pentaxV3; n->order = EXIF_BYTE_ORDER_INTEL; } else if ((buf[datao + 4] == 'M') && (buf[datao + 5] == 'M')) { n->version = pentaxV3; n->order = EXIF_BYTE_ORDER_MOTOROLA; } else { /* Uses Casio v2 tags */ n->version = pentaxV2; } exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataPentax", "Parsing Pentax maker note v%d...", (int)n->version); datao += 4 + 2; base = MNOTE_PENTAX2_TAG_BASE; } else if (!memcmp(buf + datao, "QVC", 4)) { exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataPentax", "Parsing Casio maker note v2..."); n->version = casioV2; base = MNOTE_CASIO2_TAG_BASE; datao += 4 + 2; } else { /* probably assert(!memcmp(buf + datao, "\x00\x1b", 2)) */ exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataPentax", "Parsing Pentax maker note v1..."); n->version = pentaxV1; } /* Read the number of tags */ c = exif_get_short (buf + datao, n->order); datao += 2; /* Remove any old entries */ exif_mnote_data_pentax_clear (n); /* Reserve enough space for all the possible MakerNote tags */ n->entries = exif_mem_alloc (en->mem, sizeof (MnotePentaxEntry) * c); if (!n->entries) { EXIF_LOG_NO_MEMORY(en->log, "ExifMnoteDataPentax", sizeof (MnotePentaxEntry) * c); return; } /* Parse all c entries, storing ones that are successfully parsed */ tcount = 0; for (i = c, o = datao; i; --i, o += 12) { size_t s; if (CHECKOVERFLOW(o,buf_size,12)) { exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifMnoteDataPentax", "Short MakerNote"); break; } n->entries[tcount].tag = exif_get_short (buf + o + 0, n->order) + base; n->entries[tcount].format = exif_get_short (buf + o + 2, n->order); n->entries[tcount].components = exif_get_long (buf + o + 4, n->order); n->entries[tcount].order = n->order; exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnotePentax", "Loading entry 0x%x ('%s')...", n->entries[tcount].tag, mnote_pentax_tag_get_name (n->entries[tcount].tag)); /* Check if we overflow the multiplication. Use buf_size as the max size for integer overflow detection, * we will check the buffer sizes closer later. */ if ( exif_format_get_size (n->entries[tcount].format) && buf_size / exif_format_get_size (n->entries[tcount].format) < n->entries[tcount].components ) { exif_log (en->log, EXIF_LOG_CODE_CORRUPT_DATA, "ExifMnoteDataPentax", "Tag size overflow detected (%u * %lu)", exif_format_get_size (n->entries[tcount].format), n->entries[tcount].components); break; } /* * Size? If bigger than 4 bytes, the actual data is not * in the entry but somewhere else (offset). */ s = exif_format_get_size (n->entries[tcount].format) * n->entries[tcount].components; n->entries[tcount].size = s; if (s) { size_t dataofs = o + 8; if (s > 4) /* The data in this case is merely a pointer */ dataofs = exif_get_long (buf + dataofs, n->order) + 6; if (CHECKOVERFLOW(dataofs, buf_size, s)) { exif_log (en->log, EXIF_LOG_CODE_DEBUG, "ExifMnoteDataPentax", "Tag data past end " "of buffer (%u > %u)", (unsigned)(dataofs + s), buf_size); continue; } n->entries[tcount].data = exif_mem_alloc (en->mem, s); if (!n->entries[tcount].data) { EXIF_LOG_NO_MEMORY(en->log, "ExifMnoteDataPentax", s); continue; } memcpy (n->entries[tcount].data, buf + dataofs, s); } /* Tag was successfully parsed */ ++tcount; } /* Store the count of successfully parsed tags */ n->count = tcount; } static unsigned int exif_mnote_data_pentax_count (ExifMnoteData *n) { return n ? ((ExifMnoteDataPentax *) n)->count : 0; } static unsigned int exif_mnote_data_pentax_get_id (ExifMnoteData *d, unsigned int n) { ExifMnoteDataPentax *note = (ExifMnoteDataPentax *) d; if (!note) return 0; if (note->count <= n) return 0; return note->entries[n].tag; } static const char * exif_mnote_data_pentax_get_name (ExifMnoteData *d, unsigned int n) { ExifMnoteDataPentax *note = (ExifMnoteDataPentax *) d; if (!note) return NULL; if (note->count <= n) return NULL; return mnote_pentax_tag_get_name (note->entries[n].tag); } static const char * exif_mnote_data_pentax_get_title (ExifMnoteData *d, unsigned int n) { ExifMnoteDataPentax *note = (ExifMnoteDataPentax *) d; if (!note) return NULL; if (note->count <= n) return NULL; return mnote_pentax_tag_get_title (note->entries[n].tag); } static const char * exif_mnote_data_pentax_get_description (ExifMnoteData *d, unsigned int n) { ExifMnoteDataPentax *note = (ExifMnoteDataPentax *) d; if (!note) return NULL; if (note->count <= n) return NULL; return mnote_pentax_tag_get_description (note->entries[n].tag); } static void exif_mnote_data_pentax_set_offset (ExifMnoteData *d, unsigned int o) { if (d) ((ExifMnoteDataPentax *) d)->offset = o; } static void exif_mnote_data_pentax_set_byte_order (ExifMnoteData *d, ExifByteOrder o) { ExifByteOrder o_orig; ExifMnoteDataPentax *n = (ExifMnoteDataPentax *) d; unsigned int i; if (!n) return; o_orig = n->order; n->order = o; for (i = 0; i < n->count; i++) { if (n->entries[i].components && (n->entries[i].size/n->entries[i].components < exif_format_get_size (n->entries[i].format))) continue; n->entries[i].order = o; exif_array_set_byte_order (n->entries[i].format, n->entries[i].data, n->entries[i].components, o_orig, o); } } int exif_mnote_data_pentax_identify (const ExifData *ed, const ExifEntry *e) { (void) ed; /* unused */ if ((e->size >= 8) && !memcmp (e->data, "AOC", 4)) { if (((e->data[4] == 'I') && (e->data[5] == 'I')) || ((e->data[4] == 'M') && (e->data[5] == 'M'))) return pentaxV3; else /* Uses Casio v2 tags */ return pentaxV2; } if ((e->size >= 8) && !memcmp (e->data, "QVC", 4)) return casioV2; /* This isn't a very robust test, so make sure it's done last */ /* Maybe we should additionally check for a make of Asahi or Pentax */ if ((e->size >= 2) && (e->data[0] == 0x00) && (e->data[1] == 0x1b)) return pentaxV1; return 0; } ExifMnoteData * exif_mnote_data_pentax_new (ExifMem *mem) { ExifMnoteData *d; if (!mem) return NULL; d = exif_mem_alloc (mem, sizeof (ExifMnoteDataPentax)); if (!d) return NULL; exif_mnote_data_construct (d, mem); /* Set up function pointers */ d->methods.free = exif_mnote_data_pentax_free; d->methods.set_byte_order = exif_mnote_data_pentax_set_byte_order; d->methods.set_offset = exif_mnote_data_pentax_set_offset; d->methods.load = exif_mnote_data_pentax_load; d->methods.save = exif_mnote_data_pentax_save; d->methods.count = exif_mnote_data_pentax_count; d->methods.get_id = exif_mnote_data_pentax_get_id; d->methods.get_name = exif_mnote_data_pentax_get_name; d->methods.get_title = exif_mnote_data_pentax_get_title; d->methods.get_description = exif_mnote_data_pentax_get_description; d->methods.get_value = exif_mnote_data_pentax_get_value; return d; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_4020_3
crossvul-cpp_data_good_4655_0
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> #include <linux/hash.h> #include <linux/bitops.h> #include <linux/init_task.h> #include <linux/uaccess.h> #include "internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname)) struct filename * getname_flags(const char __user *filename, int flags, int *empty) { struct filename *result; char *kname; int len; result = audit_reusename(filename); if (result) return result; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); /* * First, try to embed the struct filename inside the names_cache * allocation */ kname = (char *)result->iname; result->name = kname; len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); if (unlikely(len < 0)) { __putname(result); return ERR_PTR(len); } /* * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a * separate struct filename so we can dedicate the entire * names_cache allocation for the pathname, and re-do the copy from * userland. */ if (unlikely(len == EMBEDDED_NAME_MAX)) { const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; /* * size is chosen that way we to guarantee that * result->iname[0] is within the same object and that * kname can't be equal to result->iname, no matter what. */ result = kzalloc(size, GFP_KERNEL); if (unlikely(!result)) { __putname(kname); return ERR_PTR(-ENOMEM); } result->name = kname; len = strncpy_from_user(kname, filename, PATH_MAX); if (unlikely(len < 0)) { __putname(kname); kfree(result); return ERR_PTR(len); } if (unlikely(len == PATH_MAX)) { __putname(kname); kfree(result); return ERR_PTR(-ENAMETOOLONG); } } result->refcnt = 1; /* The empty path is special. */ if (unlikely(!len)) { if (empty) *empty = 1; if (!(flags & LOOKUP_EMPTY)) { putname(result); return ERR_PTR(-ENOENT); } } result->uptr = filename; result->aname = NULL; audit_getname(result); return result; } struct filename * getname(const char __user * filename) { return getname_flags(filename, 0, NULL); } struct filename * getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { const size_t size = offsetof(struct filename, iname[1]); struct filename *tmp; tmp = kmalloc(size, GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; } void putname(struct filename *name) { BUG_ON(name->refcnt <= 0); if (--name->refcnt > 0) return; if (name->name != name->iname) { __putname(name->name); kfree(name); } else __putname(name); } static int check_acl(struct inode *inode, int mask) { #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *acl; if (mask & MAY_NOT_BLOCK) { acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ if (is_uncached_acl(acl)) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } acl = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } #endif return -EAGAIN; } /* * This does the basic permission checking */ static int acl_permission_check(struct inode *inode, int mask) { unsigned int mode = inode->i_mode; if (likely(uid_eq(current_fsuid(), inode->i_uid))) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask) { int ret; /* * Do the basic permission checks. */ ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ if (!(mask & MAY_WRITE)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; return -EACCES; } /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; /* * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; return -EACCES; } EXPORT_SYMBOL(generic_permission); /* * We _really_ want to just do "generic_permission()" without * even looking at the inode->i_op values. So we keep a cache * flag in inode->i_opflags, that says "this has not special * permission function, use the fast case". */ static inline int do_inode_permission(struct inode *inode, int mask) { if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { if (likely(inode->i_op->permission)) return inode->i_op->permission(inode, mask); /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_FASTPERM; spin_unlock(&inode->i_lock); } return generic_permission(inode, mask); } /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. */ static int sb_permission(struct super_block *sb, struct inode *inode, int mask) { if (unlikely(mask & MAY_WRITE)) { umode_t mode = inode->i_mode; /* Nobody gets write access to a read-only fs. */ if (sb_rdonly(sb) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; } return 0; } /** * inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. We use fs[ug]id for * this, letting us set arbitrary permissions for filesystem access without * changing the "normal" UIDs which are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. */ int inode_permission(struct inode *inode, int mask) { int retval; retval = sb_permission(inode->i_sb, inode, mask); if (retval) return retval; if (unlikely(mask & MAY_WRITE)) { /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EPERM; /* * Updating mtime will likely cause i_uid and i_gid to be * written back improperly if their true value is unknown * to the vfs. */ if (HAS_UNMAPPED_ID(inode)) return -EACCES; } retval = do_inode_permission(inode, mask); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } EXPORT_SYMBOL(inode_permission); /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(const struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(const struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); #define EMBEDDED_LEVELS 2 struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; unsigned seq, m_seq; int last_type; unsigned depth; int total_link_count; struct saved { struct path link; struct delayed_call done; const char *name; unsigned seq; } *stack, internal[EMBEDDED_LEVELS]; struct filename *name; struct nameidata *saved; struct inode *link_inode; unsigned root_seq; int dfd; } __randomize_layout; static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) { struct nameidata *old = current->nameidata; p->stack = p->internal; p->dfd = dfd; p->name = name; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; } static void restore_nameidata(void) { struct nameidata *now = current->nameidata, *old = now->saved; current->nameidata = old; if (old) old->total_link_count = now->total_link_count; if (now->stack != now->internal) kfree(now->stack); } static int __nd_alloc_stack(struct nameidata *nd) { struct saved *p; if (nd->flags & LOOKUP_RCU) { p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved), GFP_ATOMIC); if (unlikely(!p)) return -ECHILD; } else { p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; } memcpy(p, nd->internal, sizeof(nd->internal)); nd->stack = p; return 0; } /** * path_connected - Verify that a path->dentry is below path->mnt.mnt_root * @path: nameidate to verify * * Rename can sometimes move a file or directory outside of a bind * mount, path_connected allows those cases to be detected. */ static bool path_connected(const struct path *path) { struct vfsmount *mnt = path->mnt; struct super_block *sb = mnt->mnt_sb; /* Bind mounts and multi-root filesystems can have disconnected paths */ if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root)) return true; return is_subdir(path->dentry, mnt->mnt_root); } static inline int nd_alloc_stack(struct nameidata *nd) { if (likely(nd->depth != EMBEDDED_LEVELS)) return 0; if (likely(nd->stack != nd->internal)) return 0; return __nd_alloc_stack(nd); } static void drop_links(struct nameidata *nd) { int i = nd->depth; while (i--) { struct saved *last = nd->stack + i; do_delayed_call(&last->done); clear_delayed_call(&last->done); } } static void terminate_walk(struct nameidata *nd) { drop_links(nd); if (!(nd->flags & LOOKUP_RCU)) { int i; path_put(&nd->path); for (i = 0; i < nd->depth; i++) path_put(&nd->stack[i].link); if (nd->flags & LOOKUP_ROOT_GRABBED) { path_put(&nd->root); nd->flags &= ~LOOKUP_ROOT_GRABBED; } } else { nd->flags &= ~LOOKUP_RCU; rcu_read_unlock(); } nd->depth = 0; } /* path_put is needed afterwards regardless of success or failure */ static bool legitimize_path(struct nameidata *nd, struct path *path, unsigned seq) { int res = __legitimize_mnt(path->mnt, nd->m_seq); if (unlikely(res)) { if (res > 0) path->mnt = NULL; path->dentry = NULL; return false; } if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) { path->dentry = NULL; return false; } return !read_seqcount_retry(&path->dentry->d_seq, seq); } static bool legitimize_links(struct nameidata *nd) { int i; for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { drop_links(nd); nd->depth = i + 1; return false; } } return true; } static bool legitimize_root(struct nameidata *nd) { if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT)) return true; nd->flags |= LOOKUP_ROOT_GRABBED; return legitimize_path(nd, &nd->root, nd->root_seq); } /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to ref-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path and nd->root * for ref-walk mode. * Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ static int unlazy_walk(struct nameidata *nd) { struct dentry *parent = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out1; if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) goto out; if (unlikely(!legitimize_root(nd))) goto out; rcu_read_unlock(); BUG_ON(nd->inode != parent->d_inode); return 0; out1: nd->path.mnt = NULL; nd->path.dentry = NULL; out: rcu_read_unlock(); return -ECHILD; } /** * unlazy_child - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd. Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_child() failure and * terminate_walk(). */ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) { BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref))) goto out1; /* * We need to move both the parent and the dentry from the RCU domain * to be properly refcounted. And the sequence number in the dentry * validates *both* dentry counters, since we checked the sequence * number of the parent after we got the child sequence number. So we * know the parent must still be valid if the child sequence number is */ if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) goto out; if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) goto out_dput; /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. */ if (unlikely(!legitimize_root(nd))) goto out_dput; rcu_read_unlock(); return 0; out2: nd->path.mnt = NULL; out1: nd->path.dentry = NULL; out: rcu_read_unlock(); return -ECHILD; out_dput: rcu_read_unlock(); dput(dentry); return -ECHILD; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) return dentry->d_op->d_revalidate(dentry, flags); else return 1; } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; if (unlikely(unlazy_walk(nd))) return -ECHILD; } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) return 0; status = dentry->d_op->d_weak_revalidate(dentry, nd->flags); if (status > 0) return 0; if (!status) status = -ESTALE; return status; } static void set_root(struct nameidata *nd) { struct fs_struct *fs = current->fs; if (nd->flags & LOOKUP_RCU) { unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_root(fs, &nd->root); nd->flags |= LOOKUP_ROOT_GRABBED; } } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static int nd_jump_root(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { struct dentry *d; nd->path = nd->root; d = nd->path.dentry; nd->inode = d->d_inode; nd->seq = nd->root_seq; if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq))) return -ECHILD; } else { path_put(&nd->path); nd->path = nd->root; path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } nd->flags |= LOOKUP_JUMPED; return 0; } /* * Helper to directly jump to a known parsed path from ->get_link, * caller must have taken a reference to path beforehand. */ void nd_jump_link(struct path *path) { struct nameidata *nd = current->nameidata; path_put(&nd->path); nd->path = *path; nd->inode = nd->path.dentry->d_inode; nd->flags |= LOOKUP_JUMPED; } static inline void put_link(struct nameidata *nd) { struct saved *last = nd->stack + --nd->depth; do_delayed_call(&last->done); if (!(nd->flags & LOOKUP_RCU)) path_put(&last->link); } int sysctl_protected_symlinks __read_mostly = 0; int sysctl_protected_hardlinks __read_mostly = 0; int sysctl_protected_fifos __read_mostly; int sysctl_protected_regular __read_mostly; /** * may_follow_link - Check symlink following for unsafe situations * @nd: nameidata pathwalk data * * In the case of the sysctl_protected_symlinks sysctl being enabled, * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is * in a sticky world-writable directory. This is to protect privileged * processes from failing races against path names that may change out * from under them by way of other users creating malicious symlinks. * It will permit symlinks to be followed only when outside a sticky * world-writable directory, or when the uid of the symlink and follower * match, or when the directory owner matches the symlink's owner. * * Returns 0 if following the symlink is allowed, -ve on error. */ static inline int may_follow_link(struct nameidata *nd) { const struct inode *inode; const struct inode *parent; kuid_t puid; if (!sysctl_protected_symlinks) return 0; /* Allowed if owner and follower match. */ inode = nd->link_inode; if (uid_eq(current_cred()->fsuid, inode->i_uid)) return 0; /* Allowed if parent directory not sticky and world-writable. */ parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; /* Allowed if parent directory and link owner match. */ puid = parent->i_uid; if (uid_valid(puid) && uid_eq(puid, inode->i_uid)) return 0; if (nd->flags & LOOKUP_RCU) return -ECHILD; audit_inode(nd->name, nd->stack[0].link.dentry, 0); audit_log_path_denied(AUDIT_ANOM_LINK, "follow_link"); return -EACCES; } /** * safe_hardlink_source - Check for safe hardlink conditions * @inode: the source inode to hardlink from * * Return false if at least one of the following conditions: * - inode is not a regular file * - inode is setuid * - inode is setgid and group-exec * - access failure for read and write * * Otherwise returns true. */ static bool safe_hardlink_source(struct inode *inode) { umode_t mode = inode->i_mode; /* Special files should not get pinned to the filesystem. */ if (!S_ISREG(mode)) return false; /* Setuid files should not get pinned to the filesystem. */ if (mode & S_ISUID) return false; /* Executable setgid files should not get pinned to the filesystem. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) return false; /* Hardlinking to unreadable or unwritable sources is dangerous. */ if (inode_permission(inode, MAY_READ | MAY_WRITE)) return false; return true; } /** * may_linkat - Check permissions for creating a hardlink * @link: the source to hardlink from * * Block hardlink when all of: * - sysctl_protected_hardlinks enabled * - fsuid does not match inode * - hardlink source is unsafe (see safe_hardlink_source() above) * - not CAP_FOWNER in a namespace with the inode owner uid mapped * * Returns 0 if successful, -ve on error. */ static int may_linkat(struct path *link) { struct inode *inode = link->dentry->d_inode; /* Inode writeback is not safe when the uid or gid are invalid. */ if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid)) return -EOVERFLOW; if (!sysctl_protected_hardlinks) return 0; /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ if (safe_hardlink_source(inode) || inode_owner_or_capable(inode)) return 0; audit_log_path_denied(AUDIT_ANOM_LINK, "linkat"); return -EPERM; } /** * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory * should be allowed, or not, on files that already * exist. * @dir_mode: mode bits of directory * @dir_uid: owner of directory * @inode: the inode of the file to open * * Block an O_CREAT open of a FIFO (or a regular file) when: * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled * - the file already exists * - we are in a sticky directory * - we don't own the file * - the owner of the directory doesn't own the file * - the directory is world writable * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2 * the directory doesn't have to be world writable: being group writable will * be enough. * * Returns 0 if the open is allowed, -ve on error. */ static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid, struct inode * const inode) { if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) || (!sysctl_protected_regular && S_ISREG(inode->i_mode)) || likely(!(dir_mode & S_ISVTX)) || uid_eq(inode->i_uid, dir_uid) || uid_eq(current_fsuid(), inode->i_uid)) return 0; if (likely(dir_mode & 0002) || (dir_mode & 0020 && ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) || (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) { const char *operation = S_ISFIFO(inode->i_mode) ? "sticky_create_fifo" : "sticky_create_regular"; audit_log_path_denied(AUDIT_ANOM_CREAT, operation); return -EACCES; } return 0; } static __always_inline const char *get_link(struct nameidata *nd) { struct saved *last = nd->stack + nd->depth - 1; struct dentry *dentry = last->link.dentry; struct inode *inode = nd->link_inode; int error; const char *res; if (!(nd->flags & LOOKUP_RCU)) { touch_atime(&last->link); cond_resched(); } else if (atime_needs_update(&last->link, inode)) { if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } error = security_inode_follow_link(dentry, inode, nd->flags & LOOKUP_RCU); if (unlikely(error)) return ERR_PTR(error); nd->last_type = LAST_BIND; res = READ_ONCE(inode->i_link); if (!res) { const char * (*get)(struct dentry *, struct inode *, struct delayed_call *); get = inode->i_op->get_link; if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } } else { res = get(dentry, inode, &last->done); } if (IS_ERR_OR_NULL(res)) return res; } if (*res == '/') { if (!nd->root.mnt) set_root(nd); if (unlikely(nd_jump_root(nd))) return ERR_PTR(-ECHILD); while (unlikely(*++res == '/')) ; } if (!*res) res = NULL; return res; } /* * follow_up - Find the mountpoint of path's vfsmount * * Given a path, find the mountpoint of its source file system. * Replace @path with the path of the mountpoint in the parent mount. * Up is towards /. * * Return 1 if we went up a level and 0 if we were already at the * root. */ int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; read_seqlock_excl(&mount_lock); parent = mnt->mnt_parent; if (parent == mnt) { read_sequnlock_excl(&mount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); read_sequnlock_excl(&mount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = &parent->mnt; return 1; } EXPORT_SYMBOL(follow_up); /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, struct nameidata *nd, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone's just doing a stat - * unless they're stat'ing a directory and appended a '/' to * the name. * * We do, however, want to mount if someone wants to open or * create a file of any type under the mountpoint, wants to * traverse through the mountpoint or wants to open the * mounted directory. Also, autofs may mark negative dentries * as being automount points. These will need the attentions * of the daemon to instantiate them before they can be used. */ if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && path->dentry->d_inode) return -EISDIR; nd->total_link_count++; if (nd->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * On success path->dentry is known positive. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, struct nameidata *nd) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned flags; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (flags = smp_load_acquire(&path->dentry->d_flags), unlikely(flags & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (flags & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path, false); flags = smp_load_acquire(&path->dentry->d_flags); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (flags & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before lookup_mnt() could * get it */ } /* Handle an automount point */ if (flags & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, nd, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (need_mntput) nd->flags |= LOOKUP_JUMPED; if (ret == -EISDIR || !ret) ret = 1; if (ret > 0 && unlikely(d_flags_negative(flags))) ret = -ENOENT; if (unlikely(ret < 0)) path_put_conditional(path, nd); return ret; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } EXPORT_SYMBOL(follow_down_one); static inline int managed_dentry_rcu(const struct path *path) { return (path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? path->dentry->d_op->d_manage(path, true) : 0; } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { for (;;) { struct mount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ switch (managed_dentry_rcu(path)) { case -ECHILD: default: return false; case -EISDIR: return true; case 0: break; } if (!d_mountpoint(path->dentry)) return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); mounted = __lookup_mnt(path->mnt, path->dentry); if (!mounted) break; path->mnt = &mounted->mnt; path->dentry = mounted->mnt.mnt_root; nd->flags |= LOOKUP_JUMPED; *seqp = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } return !read_seqretry(&mount_lock, nd->m_seq) && !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); } static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; while (1) { if (path_equal(&nd->path, &nd->root)) break; if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; inode = parent->d_inode; seq = read_seqcount_begin(&parent->d_seq); if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq))) return -ECHILD; nd->path.dentry = parent; nd->seq = seq; if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } else { struct mount *mnt = real_mount(nd->path.mnt); struct mount *mparent = mnt->mnt_parent; struct dentry *mountpoint = mnt->mnt_mountpoint; struct inode *inode2 = mountpoint->d_inode; unsigned seq = read_seqcount_begin(&mountpoint->d_seq); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (&mparent->mnt == nd->path.mnt) break; /* we know that mountpoint was pinned */ nd->path.dentry = mountpoint; nd->path.mnt = &mparent->mnt; inode = inode2; nd->seq = seq; } } while (unlikely(d_mountpoint(nd->path.dentry))) { struct mount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (!mounted) break; nd->path.mnt = &mounted->mnt; nd->path.dentry = mounted->mnt.mnt_root; inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } nd->inode = inode; return 0; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = READ_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } EXPORT_SYMBOL(follow_down); /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static int path_parent_directory(struct path *path) { struct dentry *old = path->dentry; /* rare case of legitimate dget_parent()... */ path->dentry = dget_parent(path->dentry); dput(old); if (unlikely(!path_connected(path))) return -ENOENT; return 0; } static int follow_dotdot(struct nameidata *nd) { while(1) { if (path_equal(&nd->path, &nd->root)) break; if (nd->path.dentry != nd->path.mnt->mnt_root) { int ret = path_parent_directory(&nd->path); if (ret) return ret; break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; return 0; } /* * This looks up the name in dcache and possibly revalidates the found dentry. * NULL is returned if the dentry does not exist in the cache. */ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry = d_lookup(dir, name); if (dentry) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); dput(dentry); return ERR_PTR(error); } } return dentry; } /* * Parent directory has inode locked exclusive. This is one * and only case when ->lookup() gets called on non in-lookup * dentries - as the matter of fact, this only gets called * when directory is guaranteed to have no in-lookup children * at all. */ static struct dentry *__lookup_hash(const struct qstr *name, struct dentry *base, unsigned int flags) { struct dentry *dentry = lookup_dcache(name, base, flags); struct dentry *old; struct inode *dir = base->d_inode; if (dentry) return dentry; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) return ERR_PTR(-ENOENT); dentry = d_alloc(base, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); old = dir->i_op->lookup(dir, dentry, flags); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } static int lookup_fast(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, the caller is * going to fall back to non-racy lookup. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { if (unlazy_walk(nd)) return -ECHILD; return 0; } /* * This sequence count validates that the inode matches * the dentry name information from lookup. */ *inode = d_backing_inode(dentry); negative = d_is_negative(dentry); if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) return -ECHILD; /* * This sequence count validates that the parent had no * changes while we did the lookup of the dentry above. * * The memory barrier in read_seqcount_begin of child is * enough, we can use __read_seqcount_retry here. */ if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq))) return -ECHILD; *seqp = seq; status = d_revalidate(dentry, nd->flags); if (likely(status > 0)) { /* * Note: do negative dentry check after revalidation in * case that drops it. */ if (unlikely(negative)) return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; } if (unlazy_child(nd, dentry, seq)) return -ECHILD; if (unlikely(status == -ECHILD)) /* we'd been told to redo it in non-rcu mode */ status = d_revalidate(dentry, nd->flags); } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) d_invalidate(dentry); dput(dentry); return status; } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd); if (likely(err > 0)) *inode = d_backing_inode(path->dentry); return err; } /* Fast lookup failed, do it the slow way */ static struct dentry *__lookup_slow(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry, *old; struct inode *inode = dir->d_inode; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) return ERR_PTR(-ENOENT); again: dentry = d_alloc_parallel(dir, name, &wq); if (IS_ERR(dentry)) return dentry; if (unlikely(!d_in_lookup(dentry))) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) { d_invalidate(dentry); dput(dentry); goto again; } dput(dentry); dentry = ERR_PTR(error); } } else { old = inode->i_op->lookup(inode, dentry, flags); d_lookup_done(dentry); if (unlikely(old)) { dput(dentry); dentry = old; } } return dentry; } static struct dentry *lookup_slow(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct inode *inode = dir->d_inode; struct dentry *res; inode_lock_shared(inode); res = __lookup_slow(name, dir, flags); inode_unlock_shared(inode); return res; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (!nd->root.mnt) set_root(nd); if (nd->flags & LOOKUP_RCU) { return follow_dotdot_rcu(nd); } else return follow_dotdot(nd); } return 0; } static int pick_link(struct nameidata *nd, struct path *link, struct inode *inode, unsigned seq) { int error; struct saved *last; if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) { path_to_nameidata(link, nd); return -ELOOP; } if (!(nd->flags & LOOKUP_RCU)) { if (link->mnt == nd->path.mnt) mntget(link->mnt); } error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { if (unlikely(!legitimize_path(nd, link, seq))) { drop_links(nd); nd->depth = 0; nd->flags &= ~LOOKUP_RCU; nd->path.mnt = NULL; nd->path.dentry = NULL; rcu_read_unlock(); } else if (likely(unlazy_walk(nd)) == 0) error = nd_alloc_stack(nd); } if (error) { path_put(link); return error; } } last = nd->stack + nd->depth++; last->link = *link; clear_delayed_call(&last->done); nd->link_inode = inode; last->seq = seq; return 1; } enum {WALK_FOLLOW = 1, WALK_MORE = 2}; /* * Do we need to follow links? We _really_ want to be able * to do this check without having to look at inode->i_op, * so we keep a cache of "no, this doesn't need follow_link" * for the common case. */ static inline int step_into(struct nameidata *nd, struct path *path, int flags, struct inode *inode, unsigned seq) { if (!(flags & WALK_MORE) && nd->depth) put_link(nd); if (likely(!d_is_symlink(path->dentry)) || !(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) { /* not a symlink or should not follow */ path_to_nameidata(path, nd); nd->inode = inode; nd->seq = seq; return 0; } /* make sure that d_is_symlink above matches inode */ if (nd->flags & LOOKUP_RCU) { if (read_seqcount_retry(&path->dentry->d_seq, seq)) return -ECHILD; } return pick_link(nd, path, inode, seq); } static int walk_component(struct nameidata *nd, int flags) { struct path path; struct inode *inode; unsigned seq; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(nd->last_type != LAST_NORM)) { err = handle_dots(nd, nd->last_type); if (!(flags & WALK_MORE) && nd->depth) put_link(nd); return err; } err = lookup_fast(nd, &path, &inode, &seq); if (unlikely(err <= 0)) { if (err < 0) return err; path.dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags); if (IS_ERR(path.dentry)) return PTR_ERR(path.dentry); path.mnt = nd->path.mnt; err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; seq = 0; /* we are already out of RCU mode */ inode = d_backing_inode(path.dentry); } return step_into(nd, &path, flags, inode, seq); } /* * We can do the critical dentry name comparison and hashing * operations one word at a time, but we are limited to: * * - Architectures with fast unaligned word accesses. We could * do a "get_unaligned()" if this helps and is sufficiently * fast. * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. * * - Furthermore, we need an efficient 64-bit compile for the * 64-bit case in order to generate the "number of bytes in * the final mask". Again, that could be replaced with a * efficient population count instruction or similar. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> #ifdef HASH_MIX /* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */ #elif defined(CONFIG_64BIT) /* * Register pressure in the mixing function is an issue, particularly * on 32-bit x86, but almost any function requires one state value and * one temporary. Instead, use a function designed for two state values * and no temporaries. * * This function cannot create a collision in only two iterations, so * we have two iterations to achieve avalanche. In those two iterations, * we have six layers of mixing, which is enough to spread one bit's * influence out to 2^6 = 64 state bits. * * Rotate constants are scored by considering either 64 one-bit input * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the * probability of that delta causing a change to each of the 128 output * bits, using a sample of random initial states. * * The Shannon entropy of the computed probabilities is then summed * to produce a score. Ideally, any input change has a 50% chance of * toggling any given output bit. * * Mixing scores (in bits) for (12,45): * Input delta: 1-bit 2-bit * 1 round: 713.3 42542.6 * 2 rounds: 2753.7 140389.8 * 3 rounds: 5954.1 233458.2 * 4 rounds: 7862.6 256672.2 * Perfect: 8192 258048 * (64*128) (64*63/2 * 128) */ #define HASH_MIX(x, y, a) \ ( x ^= (a), \ y ^= x, x = rol64(x,12),\ x += y, y = rol64(y,45),\ y *= 9 ) /* * Fold two longs into one 32-bit hash value. This must be fast, but * latency isn't quite as critical, as there is a fair bit of additional * work done before the hash value is used. */ static inline unsigned int fold_hash(unsigned long x, unsigned long y) { y ^= x * GOLDEN_RATIO_64; y *= GOLDEN_RATIO_64; return y >> 32; } #else /* 32-bit case */ /* * Mixing scores (in bits) for (7,20): * Input delta: 1-bit 2-bit * 1 round: 330.3 9201.6 * 2 rounds: 1246.4 25475.4 * 3 rounds: 1907.1 31295.1 * 4 rounds: 2042.3 31718.6 * Perfect: 2048 31744 * (32*64) (32*31/2 * 64) */ #define HASH_MIX(x, y, a) \ ( x ^= (a), \ y ^= x, x = rol32(x, 7),\ x += y, y = rol32(y,20),\ y *= 9 ) static inline unsigned int fold_hash(unsigned long x, unsigned long y) { /* Use arch-optimized multiply if one exists */ return __hash_32(y ^ __hash_32(x)); } #endif /* * Return the hash of a string of known length. This is carfully * designed to match hash_name(), which is the more critical function. * In particular, we must end by hashing a final word containing 0..7 * payload bytes, to match the way that hash_name() iterates until it * finds the delimiter after the name. */ unsigned int full_name_hash(const void *salt, const char *name, unsigned int len) { unsigned long a, x = 0, y = (unsigned long)salt; for (;;) { if (!len) goto done; a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; HASH_MIX(x, y, a); name += sizeof(unsigned long); len -= sizeof(unsigned long); } x ^= a & bytemask_from_count(len); done: return fold_hash(x, y); } EXPORT_SYMBOL(full_name_hash); /* Return the "hash_len" (hash and length) of a null-terminated string */ u64 hashlen_string(const void *salt, const char *name) { unsigned long a = 0, x = 0, y = (unsigned long)salt; unsigned long adata, mask, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; len = 0; goto inside; do { HASH_MIX(x, y, a); len += sizeof(unsigned long); inside: a = load_unaligned_zeropad(name+len); } while (!has_zero(a, &adata, &constants)); adata = prep_zero_mask(a, adata, &constants); mask = create_zero_mask(adata); x ^= a & zero_bytemask(mask); return hashlen_create(fold_hash(x, y), len + find_zero(mask)); } EXPORT_SYMBOL(hashlen_string); /* * Calculate the length and hash of the path component, and * return the "hash_len" as the result. */ static inline u64 hash_name(const void *salt, const char *name) { unsigned long a = 0, b, x = 0, y = (unsigned long)salt; unsigned long adata, bdata, mask, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; len = 0; goto inside; do { HASH_MIX(x, y, a); len += sizeof(unsigned long); inside: a = load_unaligned_zeropad(name+len); b = a ^ REPEAT_BYTE('/'); } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); adata = prep_zero_mask(a, adata, &constants); bdata = prep_zero_mask(b, bdata, &constants); mask = create_zero_mask(adata | bdata); x ^= a & zero_bytemask(mask); return hashlen_create(fold_hash(x, y), len + find_zero(mask)); } #else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */ /* Return the hash of a string of known length */ unsigned int full_name_hash(const void *salt, const char *name, unsigned int len) { unsigned long hash = init_name_hash(salt); while (len--) hash = partial_name_hash((unsigned char)*name++, hash); return end_name_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* Return the "hash_len" (hash and length) of a null-terminated string */ u64 hashlen_string(const void *salt, const char *name) { unsigned long hash = init_name_hash(salt); unsigned long len = 0, c; c = (unsigned char)*name; while (c) { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } return hashlen_create(end_name_hash(hash), len); } EXPORT_SYMBOL(hashlen_string); /* * We know there's a real path component here of at least * one character. */ static inline u64 hash_name(const void *salt, const char *name) { unsigned long hash = init_name_hash(salt); unsigned long len = 0, c; c = (unsigned char)*name; do { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } while (c && c != '/'); return hashlen_create(end_name_hash(hash), len); } #endif /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { int err; if (IS_ERR(name)) return PTR_ERR(name); while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { u64 hash_len; int type; err = may_lookup(nd); if (err) return err; hash_len = hash_name(nd->path.dentry, name); type = LAST_NORM; if (name[0] == '.') switch (hashlen_len(hash_len)) { case 2: if (name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { struct qstr this = { { .hash_len = hash_len }, .name = name }; err = parent->d_op->d_hash(parent, &this); if (err < 0) return err; hash_len = this.hash_len; name = this.name; } } nd->last.hash_len = hash_len; nd->last.name = name; nd->last_type = type; name += hashlen_len(hash_len); if (!*name) goto OK; /* * If it wasn't NUL, we know it was '/'. Skip that * slash, and continue until no more slashes. */ do { name++; } while (unlikely(*name == '/')); if (unlikely(!*name)) { OK: /* pathname body, done */ if (!nd->depth) return 0; name = nd->stack[nd->depth - 1].name; /* trailing symlink, done */ if (!name) return 0; /* last component of nested symlink */ err = walk_component(nd, WALK_FOLLOW); } else { /* not the last component */ err = walk_component(nd, WALK_FOLLOW | WALK_MORE); } if (err < 0) return err; if (err) { const char *s = get_link(nd); if (IS_ERR(s)) return PTR_ERR(s); err = 0; if (unlikely(!s)) { /* jumped */ put_link(nd); } else { nd->stack[nd->depth - 1].name = name; name = s; continue; } } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd)) return -ECHILD; } return -ENOTDIR; } } } /* must be paired with terminate_walk() */ static const char *path_init(struct nameidata *nd, unsigned flags) { const char *s = nd->name->name; if (!*s) flags &= ~LOOKUP_RCU; if (flags & LOOKUP_RCU) rcu_read_lock(); nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; if (*s && unlikely(!d_can_lookup(root))) return ERR_PTR(-ENOTDIR); nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); nd->root_seq = nd->seq; nd->m_seq = read_seqbegin(&mount_lock); } else { path_get(&nd->path); } return s; } nd->root.mnt = NULL; nd->path.mnt = NULL; nd->path.dentry = NULL; nd->m_seq = read_seqbegin(&mount_lock); if (*s == '/') { set_root(nd); if (likely(!nd_jump_root(nd))) return s; return ERR_PTR(-ECHILD); } else if (nd->dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->inode = nd->path.dentry->d_inode; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); nd->inode = nd->path.dentry->d_inode; } return s; } else { /* Caller must check execute permissions on the starting path component */ struct fd f = fdget_raw(nd->dfd); struct dentry *dentry; if (!f.file) return ERR_PTR(-EBADF); dentry = f.file->f_path.dentry; if (*s && unlikely(!d_can_lookup(dentry))) { fdput(f); return ERR_PTR(-ENOTDIR); } nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { nd->inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } fdput(f); return s; } } static const char *trailing_symlink(struct nameidata *nd) { const char *s; int error = may_follow_link(nd); if (unlikely(error)) return ERR_PTR(error); nd->flags |= LOOKUP_PARENT; nd->stack[0].name = NULL; s = get_link(nd); return s ? s : ""; } static inline int lookup_last(struct nameidata *nd) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, 0); } static int handle_lookup_down(struct nameidata *nd) { struct path path = nd->path; struct inode *inode = nd->inode; unsigned seq = nd->seq; int err; if (nd->flags & LOOKUP_RCU) { /* * don't bother with unlazy_walk on failure - we are * at the very beginning of walk, so we lose nothing * if we simply redo everything in non-RCU mode */ if (unlikely(!__follow_mount_rcu(nd, &path, &inode, &seq))) return -ECHILD; } else { dget(path.dentry); err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; inode = d_backing_inode(path.dentry); seq = 0; } path_to_nameidata(&path, nd); nd->inode = inode; nd->seq = seq; return 0; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) { err = handle_lookup_down(nd); if (unlikely(err < 0)) s = ERR_PTR(err); } while (!(err = link_path_walk(s, nd)) && ((err = lookup_last(nd)) > 0)) { s = trailing_symlink(nd); } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) if (!d_can_lookup(nd->path.dentry)) err = -ENOTDIR; if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root) { int retval; struct nameidata nd; if (IS_ERR(name)) return PTR_ERR(name); if (unlikely(root)) { nd.root = *root; flags |= LOOKUP_ROOT; } set_nameidata(&nd, dfd, name); retval = path_lookupat(&nd, flags | LOOKUP_RCU, path); if (unlikely(retval == -ECHILD)) retval = path_lookupat(&nd, flags, path); if (unlikely(retval == -ESTALE)) retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path); if (likely(!retval)) audit_inode(name, path->dentry, 0); restore_nameidata(); putname(name); return retval; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_parentat(struct nameidata *nd, unsigned flags, struct path *parent) { const char *s = path_init(nd, flags); int err = link_path_walk(s, nd); if (!err) err = complete_walk(nd); if (!err) { *parent = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static struct filename *filename_parentat(int dfd, struct filename *name, unsigned int flags, struct path *parent, struct qstr *last, int *type) { int retval; struct nameidata nd; if (IS_ERR(name)) return name; set_nameidata(&nd, dfd, name); retval = path_parentat(&nd, flags | LOOKUP_RCU, parent); if (unlikely(retval == -ECHILD)) retval = path_parentat(&nd, flags, parent); if (unlikely(retval == -ESTALE)) retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent); if (likely(!retval)) { *last = nd.last; *type = nd.last_type; audit_inode(name, parent->dentry, AUDIT_INODE_PARENT); } else { putname(name); name = ERR_PTR(retval); } restore_nameidata(); return name; } /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { struct filename *filename; struct dentry *d; struct qstr last; int type; filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path, &last, &type); if (IS_ERR(filename)) return ERR_CAST(filename); if (unlikely(type != LAST_NORM)) { path_put(path); putname(filename); return ERR_PTR(-EINVAL); } inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); d = __lookup_hash(&last, path->dentry, 0); if (IS_ERR(d)) { inode_unlock(path->dentry->d_inode); path_put(path); } putname(filename); return d; } int kern_path(const char *name, unsigned int flags, struct path *path) { return filename_lookup(AT_FDCWD, getname_kernel(name), flags, path, NULL); } EXPORT_SYMBOL(kern_path); /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { struct path root = {.mnt = mnt, .dentry = dentry}; /* the first argument of filename_lookup() is ignored with root */ return filename_lookup(AT_FDCWD, getname_kernel(name), flags , path, &root); } EXPORT_SYMBOL(vfs_path_lookup); static int lookup_one_len_common(const char *name, struct dentry *base, int len, struct qstr *this) { this->name = name; this->len = len; this->hash = full_name_hash(base, name, len); if (!len) return -EACCES; if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return -EACCES; } while (len--) { unsigned int c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return -EACCES; } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, this); if (err < 0) return err; } return inode_permission(base->d_inode, MAY_EXEC); } /** * try_lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Look up a dentry by name in the dcache, returning NULL if it does not * currently exist. The function does not try to create a dentry. * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * The caller must hold base->i_mutex. */ struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); err = lookup_one_len_common(name, base, len, &this); if (err) return ERR_PTR(err); return lookup_dcache(&this, base, 0); } EXPORT_SYMBOL(try_lookup_one_len); /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * The caller must hold base->i_mutex. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct dentry *dentry; struct qstr this; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); err = lookup_one_len_common(name, base, len, &this); if (err) return ERR_PTR(err); dentry = lookup_dcache(&this, base, 0); return dentry ? dentry : __lookup_slow(&this, base, 0); } EXPORT_SYMBOL(lookup_one_len); /** * lookup_one_len_unlocked - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * Unlike lookup_one_len, it should be called without the parent * i_mutex held, and will take the i_mutex itself if necessary. */ struct dentry *lookup_one_len_unlocked(const char *name, struct dentry *base, int len) { struct qstr this; int err; struct dentry *ret; err = lookup_one_len_common(name, base, len, &this); if (err) return ERR_PTR(err); ret = lookup_dcache(&this, base, 0); if (!ret) ret = lookup_slow(&this, base, 0); return ret; } EXPORT_SYMBOL(lookup_one_len_unlocked); /* * Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT) * on negatives. Returns known positive or ERR_PTR(); that's what * most of the users want. Note that pinned negative with unlocked parent * _can_ become positive at any time, so callers of lookup_one_len_unlocked() * need to be very careful; pinned positives have ->d_inode stable, so * this one avoids such problems. */ struct dentry *lookup_positive_unlocked(const char *name, struct dentry *base, int len) { struct dentry *ret = lookup_one_len_unlocked(name, base, len); if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { dput(ret); ret = ERR_PTR(-ENOENT); } return ret; } EXPORT_SYMBOL(lookup_positive_unlocked); #ifdef CONFIG_UNIX98_PTYS int path_pts(struct path *path) { /* Find something mounted on "pts" in the same directory as * the input path. */ struct dentry *child, *parent; struct qstr this; int ret; ret = path_parent_directory(path); if (ret) return ret; parent = path->dentry; this.name = "pts"; this.len = 3; child = d_hash_and_lookup(parent, &this); if (!child) return -ENOENT; path->dentry = child; dput(parent); follow_mount(path); return 0; } #endif int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { return filename_lookup(dfd, getname_flags(name, flags, empty), flags, path, NULL); } EXPORT_SYMBOL(user_path_at_empty); /** * path_mountpoint - look up a path to be umounted * @nd: lookup context * @flags: lookup flags * @path: pointer to container for result * * Look up the given name, but don't attempt to revalidate the last component. * Returns 0 and "path" will be valid on success; Returns error otherwise. */ static int path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; while (!(err = link_path_walk(s, nd)) && (err = lookup_last(nd)) > 0) { s = trailing_symlink(nd); } if (!err && (nd->flags & LOOKUP_RCU)) err = unlazy_walk(nd); if (!err) err = handle_lookup_down(nd); if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static int filename_mountpoint(int dfd, struct filename *name, struct path *path, unsigned int flags) { struct nameidata nd; int error; if (IS_ERR(name)) return PTR_ERR(name); set_nameidata(&nd, dfd, name); error = path_mountpoint(&nd, flags | LOOKUP_RCU, path); if (unlikely(error == -ECHILD)) error = path_mountpoint(&nd, flags, path); if (unlikely(error == -ESTALE)) error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path); if (likely(!error)) audit_inode(name, path->dentry, AUDIT_INODE_NOEVAL); restore_nameidata(); putname(name); return error; } /** * user_path_mountpoint_at - lookup a path from userland in order to umount it * @dfd: directory file descriptor * @name: pathname from userland * @flags: lookup flags * @path: pointer to container to hold result * * A umount is a special case for path walking. We're not actually interested * in the inode in this situation, and ESTALE errors can be a problem. We * simply want track down the dentry and vfsmount attached at the mountpoint * and avoid revalidating the last component. * * Returns 0 and populates "path" on success. */ int user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) { return filename_mountpoint(dfd, getname(name), path, flags); } int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { return filename_mountpoint(dfd, getname_kernel(name), path, flags); } EXPORT_SYMBOL(kern_path_mountpoint); int __check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } EXPORT_SYMBOL(__check_sticky); /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If the victim has an unknown uid or gid we can't change the inode. * 8. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 9. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 10. We can't remove a root or mountpoint. * 11. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) { struct inode *inode = d_backing_inode(victim); int error; if (d_is_negative(victim)) return -ENOENT; BUG_ON(!inode); BUG_ON(victim->d_parent->d_inode != dir); /* Inode writeback is not safe when the uid or gid are invalid. */ if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid)) return -EOVERFLOW; audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode)) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We can't do it if the fs can't represent the fsuid or fsgid. * 4. We should have write and exec permissions on dir * 5. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { struct user_namespace *s_user_ns; audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; s_user_ns = dir->i_sb->s_user_ns; if (!kuid_has_mapping(s_user_ns, current_fsuid()) || !kgid_has_mapping(s_user_ns, current_fsgid())) return -EOVERFLOW; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { inode_lock_nested(p2->d_inode, I_MUTEX_PARENT); inode_lock_nested(p1->d_inode, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_CHILD); return p; } inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2); return NULL; } EXPORT_SYMBOL(lock_rename); void unlock_rename(struct dentry *p1, struct dentry *p2) { inode_unlock(p1->d_inode); if (p1 != p2) { inode_unlock(p2->d_inode); mutex_unlock(&p1->d_sb->s_vfs_rename_mutex); } } EXPORT_SYMBOL(unlock_rename); int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool want_excl) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_create); int vfs_mkobj(struct dentry *dentry, umode_t mode, int (*f)(struct dentry *, umode_t, void *), void *arg) { struct inode *dir = dentry->d_parent->d_inode; int error = may_create(dir, dentry); if (error) return error; mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = f(dentry, mode, arg); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mkobj); bool may_open_dev(const struct path *path) { return !(path->mnt->mnt_flags & MNT_NODEV) && !(path->mnt->mnt_sb->s_iflags & SB_I_NODEV); } static int may_open(const struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (!may_open_dev(path)) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, MAY_OPEN | acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; return 0; } static int handle_truncate(struct file *filp) { const struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(filp); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } static inline int open_to_namei_flags(int flag) { if ((flag & O_ACCMODE) == 3) flag--; return flag; } static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode) { struct user_namespace *s_user_ns; int error = security_path_mknod(dir, dentry, mode, 0); if (error) return error; s_user_ns = dir->dentry->d_sb->s_user_ns; if (!kuid_has_mapping(s_user_ns, current_fsuid()) || !kgid_has_mapping(s_user_ns, current_fsgid())) return -EOVERFLOW; error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); if (error) return error; return security_inode_create(dir->dentry->d_inode, dentry, mode); } /* * Attempt to atomically look up, create and open a file from a negative * dentry. * * Returns 0 if successful. The file will have been created and attached to * @file by the filesystem calling finish_open(). * * If the file was looked up only or didn't need creating, FMODE_OPENED won't * be set. The caller will need to perform the open themselves. @path will * have been updated to point to the new dentry. This may be negative. * * Returns an error code otherwise. */ static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, int open_flag, umode_t mode) { struct dentry *const DENTRY_NOT_SET = (void *) -1UL; struct inode *dir = nd->path.dentry->d_inode; int error; if (!(~open_flag & (O_EXCL | O_CREAT))) /* both O_EXCL and O_CREAT */ open_flag &= ~O_TRUNC; if (nd->flags & LOOKUP_DIRECTORY) open_flag |= O_DIRECTORY; file->f_path.dentry = DENTRY_NOT_SET; file->f_path.mnt = nd->path.mnt; error = dir->i_op->atomic_open(dir, dentry, file, open_to_namei_flags(open_flag), mode); d_lookup_done(dentry); if (!error) { if (file->f_mode & FMODE_OPENED) { /* * We didn't have the inode before the open, so check open * permission here. */ int acc_mode = op->acc_mode; if (file->f_mode & FMODE_CREATED) { WARN_ON(!(open_flag & O_CREAT)); fsnotify_create(dir, dentry); acc_mode = 0; } error = may_open(&file->f_path, acc_mode, open_flag); if (WARN_ON(error > 0)) error = -EINVAL; } else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { error = -EIO; } else { if (file->f_path.dentry) { dput(dentry); dentry = file->f_path.dentry; } if (file->f_mode & FMODE_CREATED) fsnotify_create(dir, dentry); if (unlikely(d_is_negative(dentry))) { error = -ENOENT; } else { path->dentry = dentry; path->mnt = nd->path.mnt; return 0; } } } dput(dentry); return error; } /* * Look up and maybe create and open the last component. * * Must be called with parent locked (exclusive in O_CREAT case). * * Returns 0 on success, that is, if * the file was successfully atomically created (if necessary) and opened, or * the file was not completely opened at this time, though lookups and * creations were performed. * These case are distinguished by presence of FMODE_OPENED on file->f_mode. * In the latter case dentry returned in @path might be negative if O_CREAT * hadn't been specified. * * An error code is returned on failure. */ static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, bool got_write) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; int open_flag = op->open_flag; struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); if (unlikely(IS_DEADDIR(dir_inode))) return -ENOENT; file->f_mode &= ~FMODE_CREATED; dentry = d_lookup(dir, &nd->last); for (;;) { if (!dentry) { dentry = d_alloc_parallel(dir, &nd->last, &wq); if (IS_ERR(dentry)) return PTR_ERR(dentry); } if (d_in_lookup(dentry)) break; error = d_revalidate(dentry, nd->flags); if (likely(error > 0)) break; if (error) goto out_dput; d_invalidate(dentry); dput(dentry); dentry = NULL; } if (dentry->d_inode) { /* Cached positive dentry: will open in f_op->open */ goto out_no_open; } /* * Checking write permission is tricky, bacuse we don't know if we are * going to actually need it: O_CREAT opens should work as long as the * file exists. But checking existence breaks atomicity. The trick is * to check access and if not granted clear O_CREAT from the flags. * * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ if (open_flag & O_CREAT) { if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); if (unlikely(!got_write)) { create_error = -EROFS; open_flag &= ~O_CREAT; if (open_flag & (O_EXCL | O_TRUNC)) goto no_open; /* No side effects, safe to clear O_CREAT */ } else { create_error = may_o_create(&nd->path, dentry, mode); if (create_error) { open_flag &= ~O_CREAT; if (open_flag & O_EXCL) goto no_open; } } } else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) && unlikely(!got_write)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open */ goto no_open; } if (dir_inode->i_op->atomic_open) { error = atomic_open(nd, dentry, path, file, op, open_flag, mode); if (unlikely(error == -ENOENT) && create_error) error = create_error; return error; } no_open: if (d_in_lookup(dentry)) { struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry, nd->flags); d_lookup_done(dentry); if (unlikely(res)) { if (IS_ERR(res)) { error = PTR_ERR(res); goto out_dput; } dput(dentry); dentry = res; } } /* Negative dentry, just create the file */ if (!dentry->d_inode && (open_flag & O_CREAT)) { file->f_mode |= FMODE_CREATED; audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE); if (!dir_inode->i_op->create) { error = -EACCES; goto out_dput; } error = dir_inode->i_op->create(dir_inode, dentry, mode, open_flag & O_EXCL); if (error) goto out_dput; fsnotify_create(dir_inode, dentry); } if (unlikely(create_error) && !dentry->d_inode) { error = create_error; goto out_dput; } out_no_open: path->dentry = dentry; path->mnt = nd->path.mnt; return 0; out_dput: dput(dentry); return error; } /* * Handle the last step of open() */ static int do_last(struct nameidata *nd, struct file *file, const struct open_flags *op) { struct dentry *dir = nd->path.dentry; kuid_t dir_uid = dir->d_inode->i_uid; umode_t dir_mode = dir->d_inode->i_mode; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; bool got_write = false; int acc_mode = op->acc_mode; unsigned seq; struct inode *inode; struct path path; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; if (nd->last_type != LAST_NORM) { error = handle_dots(nd, nd->last_type); if (unlikely(error)) return error; goto finish_open; } if (!(open_flag & O_CREAT)) { if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; /* we _can_ be in RCU mode here */ error = lookup_fast(nd, &path, &inode, &seq); if (likely(error > 0)) goto finish_lookup; if (error < 0) return error; BUG_ON(nd->inode != dir->d_inode); BUG_ON(nd->flags & LOOKUP_RCU); } else { /* create side of things */ /* * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED * has been cleared when we got to the last component we are * about to look up */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, dir, AUDIT_INODE_PARENT); /* trailing slashes? */ if (unlikely(nd->last.name[nd->last.len])) return -EISDIR; } if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { error = mnt_want_write(nd->path.mnt); if (!error) got_write = true; /* * do _not_ fail yet - we might not need that or fail with * a different error; let lookup_open() decide; we'll be * dropping this one anyway. */ } if (open_flag & O_CREAT) inode_lock(dir->d_inode); else inode_lock_shared(dir->d_inode); error = lookup_open(nd, &path, file, op, got_write); if (open_flag & O_CREAT) inode_unlock(dir->d_inode); else inode_unlock_shared(dir->d_inode); if (error) goto out; if (file->f_mode & FMODE_OPENED) { if ((file->f_mode & FMODE_CREATED) || !S_ISREG(file_inode(file)->i_mode)) will_truncate = false; audit_inode(nd->name, file->f_path.dentry, 0); goto opened; } if (file->f_mode & FMODE_CREATED) { /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = false; acc_mode = 0; path_to_nameidata(&path, nd); goto finish_open_created; } /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } error = follow_managed(&path, nd); if (unlikely(error < 0)) return error; /* * create/update audit record if it already exists. */ audit_inode(nd->name, path.dentry, 0); if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) { path_to_nameidata(&path, nd); return -EEXIST; } seq = 0; /* out of RCU mode, so the value doesn't matter */ inode = d_backing_inode(path.dentry); finish_lookup: error = step_into(nd, &path, 0, inode, seq); if (unlikely(error)) return error; finish_open: /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, nd->path.dentry, 0); if (open_flag & O_CREAT) { error = -EISDIR; if (d_is_dir(nd->path.dentry)) goto out; error = may_create_in_sticky(dir_mode, dir_uid, d_backing_inode(nd->path.dentry)); if (unlikely(error)) goto out; } error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) goto out; if (!d_is_reg(nd->path.dentry)) will_truncate = false; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto out; got_write = true; } finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto out; BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */ error = vfs_open(&nd->path, file); if (error) goto out; opened: error = ima_file_check(file, op->acc_mode); if (!error && will_truncate) error = handle_truncate(file); out: if (unlikely(error > 0)) { WARN_ON(1); error = -EINVAL; } if (got_write) mnt_drop_write(nd->path.mnt); return error; } struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag) { struct dentry *child = NULL; struct inode *dir = dentry->d_inode; struct inode *inode; int error; /* we want directory to be writable */ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_err; error = -EOPNOTSUPP; if (!dir->i_op->tmpfile) goto out_err; error = -ENOMEM; child = d_alloc(dentry, &slash_name); if (unlikely(!child)) goto out_err; error = dir->i_op->tmpfile(dir, child, mode); if (error) goto out_err; error = -ENOENT; inode = child->d_inode; if (unlikely(!inode)) goto out_err; if (!(open_flag & O_EXCL)) { spin_lock(&inode->i_lock); inode->i_state |= I_LINKABLE; spin_unlock(&inode->i_lock); } ima_post_create_tmpfile(inode); return child; out_err: dput(child); return ERR_PTR(error); } EXPORT_SYMBOL(vfs_tmpfile); static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file) { struct dentry *child; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) return error; error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; child = vfs_tmpfile(path.dentry, op->mode, op->open_flag); error = PTR_ERR(child); if (IS_ERR(child)) goto out2; dput(path.dentry); path.dentry = child; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); if (error) goto out2; file->f_path.mnt = path.mnt; error = finish_open(file, child, NULL); out2: mnt_drop_write(path.mnt); out: path_put(&path); return error; } static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file) { struct path path; int error = path_lookupat(nd, flags, &path); if (!error) { audit_inode(nd->name, path.dentry, 0); error = vfs_open(&path, file); path_put(&path); } return error; } static struct file *path_openat(struct nameidata *nd, const struct open_flags *op, unsigned flags) { struct file *file; int error; file = alloc_empty_file(op->open_flag, current_cred()); if (IS_ERR(file)) return file; if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(nd, flags, op, file); } else if (unlikely(file->f_flags & O_PATH)) { error = do_o_path(nd, flags, file); } else { const char *s = path_init(nd, flags); while (!(error = link_path_walk(s, nd)) && (error = do_last(nd, file, op)) > 0) { nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); s = trailing_symlink(nd); } terminate_walk(nd); } if (likely(!error)) { if (likely(file->f_mode & FMODE_OPENED)) return file; WARN_ON(1); error = -EINVAL; } fput(file); if (error == -EOPENSTALE) { if (flags & LOOKUP_RCU) error = -ECHILD; else error = -ESTALE; } return ERR_PTR(error); } struct file *do_filp_open(int dfd, struct filename *pathname, const struct open_flags *op) { struct nameidata nd; int flags = op->lookup_flags; struct file *filp; set_nameidata(&nd, dfd, pathname); filp = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(&nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (IS_ERR(filename)) return ERR_CAST(filename); set_nameidata(&nd, -1, filename); file = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(&nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); putname(filename); return file; } static struct dentry *filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); struct qstr last; int type; int err2; int error; bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); /* * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any * other flags passed in are ignored! */ lookup_flags &= LOOKUP_REVAL; name = filename_parentat(dfd, name, lookup_flags, path, &last, &type); if (IS_ERR(name)) return ERR_CAST(name); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (unlikely(type != LAST_NORM)) goto out; /* don't fail immediately if it's r/o, at least try to report other errors */ err2 = mnt_want_write(path->mnt); /* * Do the final lookup. */ lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL; inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path->dentry, lookup_flags); if (IS_ERR(dentry)) goto unlock; error = -EEXIST; if (d_is_positive(dentry)) goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && last.name[last.len])) { error = -ENOENT; goto fail; } if (unlikely(err2)) { error = err2; goto fail; } putname(name); return dentry; fail: dput(dentry); dentry = ERR_PTR(error); unlock: inode_unlock(path->dentry->d_inode); if (!err2) mnt_drop_write(path->mnt); out: path_put(path); putname(name); return dentry; } struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname_kernel(pathname), path, lookup_flags); } EXPORT_SYMBOL(kern_path_create); void done_path_create(struct path *path, struct dentry *dentry) { dput(dentry); inode_unlock(path->dentry->d_inode); mnt_drop_write(path->mnt); path_put(path); } EXPORT_SYMBOL(done_path_create); inline struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname(pathname), path, lookup_flags); } EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mknod); static int may_mknod(umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = 0; error = may_mknod(mode); if (error) return error; retry: dentry = user_path_create(dfd, filename, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); if (!error) ima_post_path_mknod(dentry); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out: done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned int, dev) { return do_mknodat(dfd, filename, mode, dev); } SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return do_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); unsigned max_links = dir->i_sb->s_max_links; if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; if (max_links && dir->i_nlink >= max_links) return -EMLINK; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mkdir); long do_mkdirat(int dfd, const char __user *pathname, umode_t mode) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = LOOKUP_DIRECTORY; retry: dentry = user_path_create(dfd, pathname, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mkdir(&path, dentry, mode); if (!error) error = vfs_mkdir(path.dentry->d_inode, dentry, mode); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) { return do_mkdirat(dfd, pathname, mode); } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) { return do_mkdirat(AT_FDCWD, pathname, mode); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; dget(dentry); inode_lock(dentry->d_inode); error = -EBUSY; if (is_local_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; error = dir->i_op->rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); fsnotify_rmdir(dir, dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) d_delete(dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); long do_rmdir(int dfd, const char __user *pathname) { int error = 0; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: name = filename_parentat(dfd, getname(pathname), lookup_flags, &path, &last, &type); if (IS_ERR(name)) return PTR_ERR(name); switch (type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } error = mnt_want_write(path.mnt); if (error) goto exit1; inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = security_path_rmdir(&path, dentry); if (error) goto exit3; error = vfs_rmdir(path.dentry->d_inode, dentry); exit3: dput(dentry); exit2: inode_unlock(path.dentry->d_inode); mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } /** * vfs_unlink - unlink a filesystem object * @dir: parent directory * @dentry: victim * @delegated_inode: returns victim inode, if the inode is delegated. * * The caller must hold dir->i_mutex. * * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and * return a reference to the inode in delegated_inode. The caller * should then break the delegation on that inode and retry. Because * breaking a delegation may take a long time, the caller should drop * dir->i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) { struct inode *target = dentry->d_inode; int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; inode_lock(target); if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = try_break_deleg(target, delegated_inode); if (error) goto out; error = dir->i_op->unlink(dir, dentry); if (!error) { dont_mount(dentry); detach_mounts(dentry); fsnotify_unlink(dir, dentry); } } } out: inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(target); d_delete(dentry); } return error; } EXPORT_SYMBOL(vfs_unlink); /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ long do_unlinkat(int dfd, struct filename *name) { int error; struct dentry *dentry; struct path path; struct qstr last; int type; struct inode *inode = NULL; struct inode *delegated_inode = NULL; unsigned int lookup_flags = 0; retry: name = filename_parentat(dfd, name, lookup_flags, &path, &last, &type); if (IS_ERR(name)) return PTR_ERR(name); error = -EISDIR; if (type != LAST_NORM) goto exit1; error = mnt_want_write(path.mnt); if (error) goto exit1; retry_deleg: inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (last.name[last.len]) goto slashes; inode = dentry->d_inode; if (d_is_negative(dentry)) goto slashes; ihold(inode); error = security_path_unlink(&path, dentry); if (error) goto exit2; error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode); exit2: dput(dentry); } inode_unlock(path.dentry->d_inode); if (inode) iput(inode); /* truncate the inode here */ inode = NULL; if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path.mnt); exit1: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; inode = NULL; goto retry; } putname(name); return error; slashes: if (d_is_negative(dentry)) error = -ENOENT; else if (d_is_dir(dentry)) error = -EISDIR; else error = -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, getname(pathname)); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, getname(pathname)); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_symlink); long do_symlinkat(const char __user *oldname, int newdfd, const char __user *newname) { int error; struct filename *from; struct dentry *dentry; struct path path; unsigned int lookup_flags = 0; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); retry: dentry = user_path_create(newdfd, newname, &path, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_putname; error = security_path_symlink(&path, dentry, from->name); if (!error) error = vfs_symlink(path.dentry->d_inode, dentry, from->name); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out_putname: putname(from); return error; } SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { return do_symlinkat(oldname, newdfd, newname); } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return do_symlinkat(oldname, AT_FDCWD, newname); } /** * vfs_link - create a new link * @old_dentry: object to be linked * @dir: new parent * @new_dentry: where to create the new link * @delegated_inode: returns inode needing a delegation break * * The caller must hold dir->i_mutex * * If vfs_link discovers a delegation on the to-be-linked file in need * of breaking, it will return -EWOULDBLOCK and return a reference to the * inode in delegated_inode. The caller should then break the delegation * and retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; /* * Updating the link count will likely cause i_uid and i_gid to * be writen back improperly if their true value is unknown to * the vfs. */ if (HAS_UNMAPPED_ID(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; inode_lock(inode); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else { error = try_break_deleg(inode, delegated_inode); if (!error) error = dir->i_op->link(old_dentry, dir, new_dentry); } if (!error && (inode->i_state & I_LINKABLE)) { spin_lock(&inode->i_lock); inode->i_state &= ~I_LINKABLE; spin_unlock(&inode->i_lock); } inode_unlock(inode); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } EXPORT_SYMBOL(vfs_link); /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ int do_linkat(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, int flags) { struct dentry *new_dentry; struct path old_path, new_path; struct inode *delegated_inode = NULL; int how = 0; int error; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; retry: error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; new_dentry = user_path_create(newdfd, newname, &new_path, (how & LOOKUP_REVAL)); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out; error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; error = may_linkat(&old_path); if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); out_dput: done_path_create(&new_path, new_dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) { path_put(&old_path); goto retry; } } if (retry_estale(error, how)) { path_put(&old_path); how |= LOOKUP_REVAL; goto retry; } out: path_put(&old_path); return error; } SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { return do_linkat(olddfd, oldname, newdfd, newname, flags); } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /** * vfs_rename - rename a filesystem object * @old_dir: parent of source * @old_dentry: source * @new_dir: parent of destination * @new_dentry: destination * @delegated_inode: returns an inode needing a delegation break * @flags: rename flags * * The caller must hold multiple mutexes--see lock_rename()). * * If vfs_rename discovers a delegation in need of breaking at either * the source or destination, it will return -EWOULDBLOCK and return a * reference to the inode in delegated_inode. The caller should then * break the delegation and retry. Because breaking a delegation may * take a long time, the caller should drop all locks before doing * so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. * * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * * a) we can get into loop creation. * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _four_ objects - parents and victim (if it exists), * and source (if it is not a directory). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, struct inode **delegated_inode, unsigned int flags) { int error; bool is_dir = d_is_dir(old_dentry); struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; struct name_snapshot old_name; if (source == target) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!target) { error = may_create(new_dir, new_dentry); } else { new_is_dir = d_is_dir(new_dentry); if (!(flags & RENAME_EXCHANGE)) error = may_delete(new_dir, new_dentry, is_dir); else error = may_delete(new_dir, new_dentry, new_is_dir); } if (error) return error; if (!old_dir->i_op->rename) return -EPERM; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { if (is_dir) { error = inode_permission(source, MAY_WRITE); if (error) return error; } if ((flags & RENAME_EXCHANGE) && new_is_dir) { error = inode_permission(target, MAY_WRITE); if (error) return error; } } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) return error; take_dentry_name_snapshot(&old_name, old_dentry); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); else if (target) inode_lock(target); error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; if (max_links && new_dir != old_dir) { error = -EMLINK; if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links) goto out; if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir && old_dir->i_nlink >= max_links) goto out; } if (!is_dir) { error = try_break_deleg(source, delegated_inode); if (error) goto out; } if (target && !new_is_dir) { error = try_break_deleg(target, delegated_inode); if (error) goto out; } error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) goto out; if (!(flags & RENAME_EXCHANGE) && target) { if (is_dir) { shrink_dcache_parent(new_dentry); target->i_flags |= S_DEAD; } dont_mount(new_dentry); detach_mounts(new_dentry); } if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) { if (!(flags & RENAME_EXCHANGE)) d_move(old_dentry, new_dentry); else d_exchange(old_dentry, new_dentry); } out: if (!is_dir || (flags & RENAME_EXCHANGE)) unlock_two_nondirectories(source, target); else if (target) inode_unlock(target); dput(new_dentry); if (!error) { fsnotify_move(old_dir, new_dir, &old_name.name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, &old_dentry->d_name, new_is_dir, NULL, new_dentry); } } release_dentry_name_snapshot(&old_name); return error; } EXPORT_SYMBOL(vfs_rename); static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, unsigned int flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct path old_path, new_path; struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; struct filename *from; struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; int error; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) return -EINVAL; if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) return -EPERM; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: from = filename_parentat(olddfd, getname(oldname), lookup_flags, &old_path, &old_last, &old_type); if (IS_ERR(from)) { error = PTR_ERR(from); goto exit; } to = filename_parentat(newdfd, getname(newname), lookup_flags, &new_path, &new_last, &new_type); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; } error = -EXDEV; if (old_path.mnt != new_path.mnt) goto exit2; error = -EBUSY; if (old_type != LAST_NORM) goto exit2; if (flags & RENAME_NOREPLACE) error = -EEXIST; if (new_type != LAST_NORM) goto exit2; error = mnt_want_write(old_path.mnt); if (error) goto exit2; retry_deleg: trap = lock_rename(new_path.dentry, old_path.dentry); old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (d_is_negative(old_dentry)) goto exit4; new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; error = -EEXIST; if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) goto exit5; if (flags & RENAME_EXCHANGE) { error = -ENOENT; if (d_is_negative(new_dentry)) goto exit5; if (!d_is_dir(new_dentry)) { error = -ENOTDIR; if (new_last.name[new_last.len]) goto exit5; } } /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!d_is_dir(old_dentry)) { error = -ENOTDIR; if (old_last.name[old_last.len]) goto exit5; if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len]) goto exit5; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit5; /* target should not be an ancestor of source */ if (!(flags & RENAME_EXCHANGE)) error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = security_path_rename(&old_path, old_dentry, &new_path, new_dentry, flags); if (error) goto exit5; error = vfs_rename(old_path.dentry->d_inode, old_dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode, flags); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_path.dentry, old_path.dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(old_path.mnt); exit2: if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); putname(to); exit1: path_put(&old_path); putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } exit: return error; } SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { return do_renameat2(olddfd, oldname, newdfd, newname, flags); } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { return do_renameat2(olddfd, oldname, newdfd, newname, 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } int vfs_whiteout(struct inode *dir, struct dentry *dentry) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mknod) return -EPERM; return dir->i_op->mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); } EXPORT_SYMBOL(vfs_whiteout); int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } /** * vfs_readlink - copy symlink body into userspace buffer * @dentry: dentry on which to get symbolic link * @buffer: user memory pointer * @buflen: size of buffer * * Does not touch atime. That's up to the caller if necessary * * Does not call security hook. */ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct inode *inode = d_inode(dentry); DEFINE_DELAYED_CALL(done); const char *link; int res; if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) { if (unlikely(inode->i_op->readlink)) return inode->i_op->readlink(dentry, buffer, buflen); if (!d_is_symlink(dentry)) return -EINVAL; spin_lock(&inode->i_lock); inode->i_opflags |= IOP_DEFAULT_READLINK; spin_unlock(&inode->i_lock); } link = READ_ONCE(inode->i_link); if (!link) { link = inode->i_op->get_link(dentry, inode, &done); if (IS_ERR(link)) return PTR_ERR(link); } res = readlink_copy(buffer, buflen, link); do_delayed_call(&done); return res; } EXPORT_SYMBOL(vfs_readlink); /** * vfs_get_link - get symlink body * @dentry: dentry on which to get symbolic link * @done: caller needs to free returned data with this * * Calls security hook and i_op->get_link() on the supplied inode. * * It does not touch atime. That's up to the caller if necessary. * * Does not work on "special" symlinks like /proc/$$/fd/N */ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done) { const char *res = ERR_PTR(-EINVAL); struct inode *inode = d_inode(dentry); if (d_is_symlink(dentry)) { res = ERR_PTR(security_inode_readlink(dentry)); if (!res) res = inode->i_op->get_link(dentry, inode, done); } return res; } EXPORT_SYMBOL(vfs_get_link); /* get the link contents into pagecache */ const char *page_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; if (!dentry) { page = find_get_page(mapping, 0); if (!page) return ERR_PTR(-ECHILD); if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-ECHILD); } } else { page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); kaddr = page_address(page); nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } EXPORT_SYMBOL(page_get_link); void page_put_link(void *arg) { put_page(arg); } EXPORT_SYMBOL(page_put_link); int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); int res = readlink_copy(buffer, buflen, page_get_link(dentry, d_inode(dentry), &done)); do_delayed_call(&done); return res; } EXPORT_SYMBOL(page_readlink); /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; unsigned int flags = 0; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; memcpy(page_address(page), symname, len-1); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } EXPORT_SYMBOL(__page_symlink); int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !mapping_gfp_constraint(inode->i_mapping, __GFP_FS)); } EXPORT_SYMBOL(page_symlink); const struct inode_operations page_symlink_inode_operations = { .get_link = page_get_link, }; EXPORT_SYMBOL(page_symlink_inode_operations);
./CrossVul/dataset_final_sorted/CWE-416/c/good_4655_0
crossvul-cpp_data_good_4218_0
/* * ndpi_main.c * * Copyright (C) 2011-20 - ntop.org * * This file is part of nDPI, an open source deep packet inspection * library based on the OpenDPI and PACE technology by ipoque GmbH * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdlib.h> #include <errno.h> #include <sys/types.h> #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_UNKNOWN #include "ndpi_config.h" #include "ndpi_api.h" #include "ahocorasick.h" #include "libcache.h" #include <time.h> #ifndef WIN32 #include <unistd.h> #endif #if defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ #include <sys/endian.h> #endif #include "ndpi_content_match.c.inc" #include "third_party/include/ndpi_patricia.h" #include "third_party/include/ht_hash.h" #include "third_party/include/ndpi_md5.h" /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static int _ndpi_debug_callbacks = 0; /* #define MATCH_DEBUG 1 */ /* ****************************************** */ static void *(*_ndpi_flow_malloc)(size_t size); static void (*_ndpi_flow_free)(void *ptr); static void *(*_ndpi_malloc)(size_t size); static void (*_ndpi_free)(void *ptr); /* ****************************************** */ /* Forward */ static void addDefaultPort(struct ndpi_detection_module_struct *ndpi_str, ndpi_port_range *range, ndpi_proto_defaults_t *def, u_int8_t customUserProto, ndpi_default_ports_tree_node_t **root, const char *_func, int _line); static int removeDefaultPort(ndpi_port_range *range, ndpi_proto_defaults_t *def, ndpi_default_ports_tree_node_t **root); /* ****************************************** */ static inline uint8_t flow_is_proto(struct ndpi_flow_struct *flow, u_int16_t p) { return((flow->detected_protocol_stack[0] == p) || (flow->detected_protocol_stack[1] == p)); } /* ****************************************** */ void *ndpi_malloc(size_t size) { return(_ndpi_malloc ? _ndpi_malloc(size) : malloc(size)); } void *ndpi_flow_malloc(size_t size) { return(_ndpi_flow_malloc ? _ndpi_flow_malloc(size) : ndpi_malloc(size)); } /* ****************************************** */ void *ndpi_calloc(unsigned long count, size_t size) { size_t len = count * size; void *p = ndpi_malloc(len); if(p) memset(p, 0, len); return(p); } /* ****************************************** */ void ndpi_free(void *ptr) { if(_ndpi_free) _ndpi_free(ptr); else free(ptr); } /* ****************************************** */ void ndpi_flow_free(void *ptr) { if(_ndpi_flow_free) _ndpi_flow_free(ptr); else ndpi_free_flow((struct ndpi_flow_struct *) ptr); } /* ****************************************** */ void *ndpi_realloc(void *ptr, size_t old_size, size_t new_size) { void *ret = ndpi_malloc(new_size); if(!ret) return(ret); else { memcpy(ret, ptr, old_size); ndpi_free(ptr); return(ret); } } /* ****************************************** */ char *ndpi_strdup(const char *s) { if(s == NULL ){ return NULL; } int len = strlen(s); char *m = ndpi_malloc(len + 1); if(m) { memcpy(m, s, len); m[len] = '\0'; } return(m); } /* *********************************************************************************** */ /* Opaque structure defined here */ struct ndpi_ptree { patricia_tree_t *v4; patricia_tree_t *v6; }; /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_struct(void) { return(sizeof(struct ndpi_flow_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_id_struct(void) { return(sizeof(struct ndpi_id_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_tcp_struct(void) { return(sizeof(struct ndpi_flow_tcp_struct)); } /* *********************************************************************************** */ u_int32_t ndpi_detection_get_sizeof_ndpi_flow_udp_struct(void) { return(sizeof(struct ndpi_flow_udp_struct)); } /* *********************************************************************************** */ char *ndpi_get_proto_by_id(struct ndpi_detection_module_struct *ndpi_str, u_int id) { return((id >= ndpi_str->ndpi_num_supported_protocols) ? NULL : ndpi_str->proto_defaults[id].protoName); } /* *********************************************************************************** */ u_int16_t ndpi_get_proto_by_name(struct ndpi_detection_module_struct *ndpi_str, const char *name) { u_int16_t i, num = ndpi_get_num_supported_protocols(ndpi_str); for (i = 0; i < num; i++) if(strcasecmp(ndpi_get_proto_by_id(ndpi_str, i), name) == 0) return(i); return(NDPI_PROTOCOL_UNKNOWN); } /* ************************************************************************************* */ #ifdef CODE_UNUSED ndpi_port_range *ndpi_build_default_ports_range(ndpi_port_range *ports, u_int16_t portA_low, u_int16_t portA_high, u_int16_t portB_low, u_int16_t portB_high, u_int16_t portC_low, u_int16_t portC_high, u_int16_t portD_low, u_int16_t portD_high, u_int16_t portE_low, u_int16_t portE_high) { int i = 0; ports[i].port_low = portA_low, ports[i].port_high = portA_high; i++; ports[i].port_low = portB_low, ports[i].port_high = portB_high; i++; ports[i].port_low = portC_low, ports[i].port_high = portC_high; i++; ports[i].port_low = portD_low, ports[i].port_high = portD_high; i++; ports[i].port_low = portE_low, ports[i].port_high = portE_high; return(ports); } #endif /* *********************************************************************************** */ ndpi_port_range *ndpi_build_default_ports(ndpi_port_range *ports, u_int16_t portA, u_int16_t portB, u_int16_t portC, u_int16_t portD, u_int16_t portE) { int i = 0; ports[i].port_low = portA, ports[i].port_high = portA; i++; ports[i].port_low = portB, ports[i].port_high = portB; i++; ports[i].port_low = portC, ports[i].port_high = portC; i++; ports[i].port_low = portD, ports[i].port_high = portD; i++; ports[i].port_low = portE, ports[i].port_high = portE; return(ports); } /* ********************************************************************************** */ void ndpi_set_proto_breed(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId, ndpi_protocol_breed_t breed) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return; else ndpi_str->proto_defaults[protoId].protoBreed = breed; } /* ********************************************************************************** */ void ndpi_set_proto_category(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId, ndpi_protocol_category_t protoCategory) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return; else ndpi_str->proto_defaults[protoId].protoCategory = protoCategory; } /* ********************************************************************************** */ /* There are some (master) protocols that are informative, meaning that it shows what is the subprotocol about, but also that the subprotocol isn't a real protocol. Example: - DNS is informative as if we see a DNS request for www.facebook.com, the returned protocol is DNS.Facebook, but Facebook isn't a real subprotocol but rather it indicates a query for Facebook and not Facebook traffic. - HTTP/SSL are NOT informative as SSL.Facebook (likely) means that this is SSL (HTTPS) traffic containg Facebook traffic. */ u_int8_t ndpi_is_subprotocol_informative(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protoId) { if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) return(0); switch (protoId) { /* All dissectors that have calls to ndpi_match_host_subprotocol() */ case NDPI_PROTOCOL_DNS: return(1); break; default: return(0); } } /* ********************************************************************************** */ void ndpi_exclude_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t protocol_id, const char *_file, const char *_func, int _line) { if(protocol_id < NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES if(ndpi_str && ndpi_str->ndpi_log_level >= NDPI_LOG_DEBUG && ndpi_str->ndpi_debug_printf != NULL) { (*(ndpi_str->ndpi_debug_printf))(protocol_id, ndpi_str, NDPI_LOG_DEBUG, _file, _func, _line, "exclude %s\n", ndpi_get_proto_name(ndpi_str, protocol_id)); } #endif NDPI_ADD_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, protocol_id); } } /* ********************************************************************************** */ void ndpi_set_proto_defaults(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_breed_t breed, u_int16_t protoId, u_int8_t can_have_a_subprotocol, u_int16_t tcp_master_protoId[2], u_int16_t udp_master_protoId[2], char *protoName, ndpi_protocol_category_t protoCategory, ndpi_port_range *tcpDefPorts, ndpi_port_range *udpDefPorts) { char *name; int j; if(protoId >= NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS) { #ifdef DEBUG NDPI_LOG_ERR(ndpi_str, "[NDPI] %s/protoId=%d: INTERNAL ERROR\n", protoName, protoId); #endif return; } if(ndpi_str->proto_defaults[protoId].protoName != NULL) { #ifdef DEBUG NDPI_LOG_ERR(ndpi_str, "[NDPI] %s/protoId=%d: already initialized. Ignoring it\n", protoName, protoId); #endif return; } name = ndpi_strdup(protoName); if(ndpi_str->proto_defaults[protoId].protoName) ndpi_free(ndpi_str->proto_defaults[protoId].protoName); ndpi_str->proto_defaults[protoId].protoName = name, ndpi_str->proto_defaults[protoId].protoCategory = protoCategory, ndpi_str->proto_defaults[protoId].protoId = protoId, ndpi_str->proto_defaults[protoId].protoBreed = breed; ndpi_str->proto_defaults[protoId].can_have_a_subprotocol = can_have_a_subprotocol; memcpy(&ndpi_str->proto_defaults[protoId].master_tcp_protoId, tcp_master_protoId, 2 * sizeof(u_int16_t)); memcpy(&ndpi_str->proto_defaults[protoId].master_udp_protoId, udp_master_protoId, 2 * sizeof(u_int16_t)); for (j = 0; j < MAX_DEFAULT_PORTS; j++) { if(udpDefPorts[j].port_low != 0) addDefaultPort(ndpi_str, &udpDefPorts[j], &ndpi_str->proto_defaults[protoId], 0, &ndpi_str->udpRoot, __FUNCTION__, __LINE__); if(tcpDefPorts[j].port_low != 0) addDefaultPort(ndpi_str, &tcpDefPorts[j], &ndpi_str->proto_defaults[protoId], 0, &ndpi_str->tcpRoot, __FUNCTION__, __LINE__); /* No port range, just the lower port */ ndpi_str->proto_defaults[protoId].tcp_default_ports[j] = tcpDefPorts[j].port_low; ndpi_str->proto_defaults[protoId].udp_default_ports[j] = udpDefPorts[j].port_low; } } /* ******************************************************************** */ static int ndpi_default_ports_tree_node_t_cmp(const void *a, const void *b) { ndpi_default_ports_tree_node_t *fa = (ndpi_default_ports_tree_node_t *) a; ndpi_default_ports_tree_node_t *fb = (ndpi_default_ports_tree_node_t *) b; //printf("[NDPI] %s(%d, %d)\n", __FUNCTION__, fa->default_port, fb->default_port); return((fa->default_port == fb->default_port) ? 0 : ((fa->default_port < fb->default_port) ? -1 : 1)); } /* ******************************************************************** */ void ndpi_default_ports_tree_node_t_walker(const void *node, const ndpi_VISIT which, const int depth) { ndpi_default_ports_tree_node_t *f = *(ndpi_default_ports_tree_node_t **) node; printf("<%d>Walk on node %s (%u)\n", depth, which == ndpi_preorder ? "ndpi_preorder" : which == ndpi_postorder ? "ndpi_postorder" : which == ndpi_endorder ? "ndpi_endorder" : which == ndpi_leaf ? "ndpi_leaf" : "unknown", f->default_port); } /* ******************************************************************** */ static void addDefaultPort(struct ndpi_detection_module_struct *ndpi_str, ndpi_port_range *range, ndpi_proto_defaults_t *def, u_int8_t customUserProto, ndpi_default_ports_tree_node_t **root, const char *_func, int _line) { u_int16_t port; for (port = range->port_low; port <= range->port_high; port++) { ndpi_default_ports_tree_node_t *node = (ndpi_default_ports_tree_node_t *) ndpi_malloc(sizeof(ndpi_default_ports_tree_node_t)); ndpi_default_ports_tree_node_t *ret; if(!node) { NDPI_LOG_ERR(ndpi_str, "%s:%d not enough memory\n", _func, _line); break; } node->proto = def, node->default_port = port, node->customUserProto = customUserProto; ret = (ndpi_default_ports_tree_node_t *) ndpi_tsearch(node, (void *) root, ndpi_default_ports_tree_node_t_cmp); /* Add it to the tree */ if(ret != node) { NDPI_LOG_DBG(ndpi_str, "[NDPI] %s:%d found duplicate for port %u: overwriting it with new value\n", _func, _line, port); ret->proto = def; ndpi_free(node); } } } /* ****************************************************** */ /* NOTE This function must be called with a semaphore set, this in order to avoid changing the datastructures while using them */ static int removeDefaultPort(ndpi_port_range *range, ndpi_proto_defaults_t *def, ndpi_default_ports_tree_node_t **root) { ndpi_default_ports_tree_node_t node; u_int16_t port; for (port = range->port_low; port <= range->port_high; port++) { ndpi_default_ports_tree_node_t *ret; node.proto = def, node.default_port = port; ret = (ndpi_default_ports_tree_node_t *) ndpi_tdelete( &node, (void *) root, ndpi_default_ports_tree_node_t_cmp); /* Add it to the tree */ if(ret != NULL) { ndpi_free((ndpi_default_ports_tree_node_t *) ret); return(0); } } return(-1); } /* ****************************************************** */ static int ndpi_string_to_automa(struct ndpi_detection_module_struct *ndpi_str, ndpi_automa *automa, char *value, u_int16_t protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed, u_int8_t free_str_on_duplicate) { AC_PATTERN_t ac_pattern; AC_ERROR_t rc; if((value == NULL) || (protocol_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS))) { NDPI_LOG_ERR(ndpi_str, "[NDPI] protoId=%d: INTERNAL ERROR\n", protocol_id); return(-1); } if(automa->ac_automa == NULL) return(-2); ac_pattern.astring = value, ac_pattern.rep.number = protocol_id, ac_pattern.rep.category = (u_int16_t) category, ac_pattern.rep.breed = (u_int16_t) breed; #ifdef MATCH_DEBUG printf("Adding to automa [%s][protocol_id: %u][category: %u][breed: %u]\n", value, protocol_id, category, breed); #endif if(value == NULL) ac_pattern.length = 0; else ac_pattern.length = strlen(ac_pattern.astring); rc = ac_automata_add(((AC_AUTOMATA_t *) automa->ac_automa), &ac_pattern); if(rc != ACERR_DUPLICATE_PATTERN && rc != ACERR_SUCCESS) return(-2); if(rc == ACERR_DUPLICATE_PATTERN && free_str_on_duplicate) ndpi_free(value); return(0); } /* ****************************************************** */ static int ndpi_add_host_url_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *_value, int protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed) { int rv; char *value = ndpi_strdup(_value); if(!value) return(-1); #ifdef DEBUG NDPI_LOG_DBG2(ndpi_str, "[NDPI] Adding [%s][%d]\n", value, protocol_id); #endif rv = ndpi_string_to_automa(ndpi_str, &ndpi_str->host_automa, value, protocol_id, category, breed, 1); if(rv != 0) ndpi_free(value); return(rv); } /* ****************************************************** */ #ifdef CODE_UNUSED int ndpi_add_content_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, int protocol_id, ndpi_protocol_category_t category, ndpi_protocol_breed_t breed) { return(ndpi_string_to_automa(ndpi_str, &ndpi_str->content_automa, value, protocol_id, category, breed, 0)); } #endif /* ****************************************************** */ /* NOTE This function must be called with a semaphore set, this in order to avoid changing the datastructures while using them */ static int ndpi_remove_host_url_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, int protocol_id) { NDPI_LOG_ERR(ndpi_str, "[NDPI] Missing implementation for proto %s/%d\n", value, protocol_id); return(-1); } /* ******************************************************************** */ void ndpi_init_protocol_match(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_match *match) { u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}; ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; if(ndpi_str->proto_defaults[match->protocol_id].protoName == NULL) { ndpi_str->proto_defaults[match->protocol_id].protoName = ndpi_strdup(match->proto_name); ndpi_str->proto_defaults[match->protocol_id].protoId = match->protocol_id; ndpi_str->proto_defaults[match->protocol_id].protoCategory = match->protocol_category; ndpi_str->proto_defaults[match->protocol_id].protoBreed = match->protocol_breed; ndpi_set_proto_defaults(ndpi_str, ndpi_str->proto_defaults[match->protocol_id].protoBreed, ndpi_str->proto_defaults[match->protocol_id].protoId, 0 /* can_have_a_subprotocol */, no_master, no_master, ndpi_str->proto_defaults[match->protocol_id].protoName, ndpi_str->proto_defaults[match->protocol_id].protoCategory, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); } ndpi_add_host_url_subprotocol(ndpi_str, match->string_to_match, match->protocol_id, match->protocol_category, match->protocol_breed); } /* ******************************************************************** */ /* Self check function to be called onli for testing purposes */ void ndpi_self_check_host_match() { u_int32_t i, j; for (i = 0; host_match[i].string_to_match != NULL; i++) { for (j = 0; host_match[j].string_to_match != NULL; j++) { if((i != j) && (strcmp(host_match[i].string_to_match, host_match[j].string_to_match) == 0)) { printf("[INTERNAL ERROR]: Duplicate string detected '%s' [id: %u, id %u]\n", host_match[i].string_to_match, i, j); printf("\nPlease fix host_match[] in ndpi_content_match.c.inc\n"); exit(0); } } } } /* ******************************************************************** */ static void init_string_based_protocols(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; host_match[i].string_to_match != NULL; i++) ndpi_init_protocol_match(ndpi_str, &host_match[i]); ndpi_enable_loaded_categories(ndpi_str); #ifdef MATCH_DEBUG // ac_automata_display(ndpi_str->host_automa.ac_automa, 'n'); #endif #if 1 for (i = 0; ndpi_en_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->bigrams_automa, (char *) ndpi_en_bigrams[i], 1, 1, 1, 0); #else for (i = 0; ndpi_en_popular_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->bigrams_automa, (char *) ndpi_en_popular_bigrams[i], 1, 1, 1, 0); #endif for (i = 0; ndpi_en_impossible_bigrams[i] != NULL; i++) ndpi_string_to_automa(ndpi_str, &ndpi_str->impossible_bigrams_automa, (char *) ndpi_en_impossible_bigrams[i], 1, 1, 1, 0); } /* ******************************************************************** */ int ndpi_set_detection_preferences(struct ndpi_detection_module_struct *ndpi_str, ndpi_detection_preference pref, int value) { switch (pref) { case ndpi_pref_direction_detect_disable: ndpi_str->direction_detect_disable = (u_int8_t) value; break; default: return(-1); } return(0); } /* ******************************************************************** */ static void ndpi_validate_protocol_initialization(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) { if(ndpi_str->proto_defaults[i].protoName == NULL) { NDPI_LOG_ERR(ndpi_str, "[NDPI] INTERNAL ERROR missing protoName initialization for [protoId=%d]: recovering\n", i); } else { if((i != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[i].protoCategory == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED)) { NDPI_LOG_ERR(ndpi_str, "[NDPI] INTERNAL ERROR missing category [protoId=%d/%s] initialization: recovering\n", i, ndpi_str->proto_defaults[i].protoName ? ndpi_str->proto_defaults[i].protoName : "???"); } } } } /* ******************************************************************** */ /* This function is used to map protocol name and default ports and it MUST be updated whenever a new protocol is added to NDPI. Do NOT add web services (NDPI_SERVICE_xxx) here. */ static void ndpi_init_protocol_defaults(struct ndpi_detection_module_struct *ndpi_str) { ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}, custom_master[2]; /* Reset all settings */ memset(ndpi_str->proto_defaults, 0, sizeof(ndpi_str->proto_defaults)); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNRATED, NDPI_PROTOCOL_UNKNOWN, 0 /* can_have_a_subprotocol */, no_master, no_master, "Unknown", NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_FTP_CONTROL, 0 /* can_have_a_subprotocol */, no_master, no_master, "FTP_CONTROL", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 21, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FTP_DATA, 0 /* can_have_a_subprotocol */, no_master, no_master, "FTP_DATA", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 20, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MAIL_POP, 0 /* can_have_a_subprotocol */, no_master, no_master, "POP3", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 110, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_POPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "POPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 995, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MAIL_SMTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMTP", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 25, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_SMTPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMTPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 465, 587, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MAIL_IMAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMAP", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 143, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MAIL_IMAPS, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMAPS", NDPI_PROTOCOL_CATEGORY_MAIL, ndpi_build_default_ports(ports_a, 993, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DNS, 1 /* can_have_a_subprotocol */, no_master, no_master, "DNS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 53, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 53, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IPP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IMO, 0 /* can_have_a_subprotocol */, no_master, no_master, "IMO", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 80, 0 /* ntop */, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MDNS, 1 /* can_have_a_subprotocol */, no_master, no_master, "MDNS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5353, 5354, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "NTP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 123, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NETBIOS, 0 /* can_have_a_subprotocol */, no_master, no_master, "NetBIOS", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 139, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 137, 138, 139, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NFS, 0 /* can_have_a_subprotocol */, no_master, no_master, "NFS", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 2049, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2049, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SSDP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SSDP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BGP, 0 /* can_have_a_subprotocol */, no_master, no_master, "BGP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 179, 2605, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SNMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SNMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 161, 162, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_XDMCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "XDMCP", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 177, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 177, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_DANGEROUS, NDPI_PROTOCOL_SMBV1, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMBv1", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 445, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SYSLOG, 0 /* can_have_a_subprotocol */, no_master, no_master, "Syslog", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 514, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 514, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DHCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "DHCP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 67, 68, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_POSTGRES, 0 /* can_have_a_subprotocol */, no_master, no_master, "PostgreSQL", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 5432, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MYSQL, 0 /* can_have_a_subprotocol */, no_master, no_master, "MySQL", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 3306, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_DIRECT_DOWNLOAD_LINK, 0 /* can_have_a_subprotocol */, no_master, no_master, "Direct_Download_Link", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_APPLEJUICE, 0 /* can_have_a_subprotocol */, no_master, no_master, "AppleJuice", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_DIRECTCONNECT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DirectConnect", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NATS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Nats", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_NTOP, 0 /* can_have_a_subprotocol */, no_master, no_master, "ntop", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VMWARE, 0 /* can_have_a_subprotocol */, no_master, no_master, "VMware", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 903, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 902, 903, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_FBZERO, 0 /* can_have_a_subprotocol */, no_master, no_master, "FacebookZero", NDPI_PROTOCOL_CATEGORY_SOCIAL_NETWORK, ndpi_build_default_ports(ports_a, 443, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_KONTIKI, 0 /* can_have_a_subprotocol */, no_master, no_master, "Kontiki", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_OPENFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "OpenFT", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_FASTTRACK, 0 /* can_have_a_subprotocol */, no_master, no_master, "FastTrack", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_GNUTELLA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Gnutella", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_EDONKEY, 0 /* can_have_a_subprotocol */, no_master, no_master, "eDonkey", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_BITTORRENT, 0 /* can_have_a_subprotocol */, no_master, no_master, "BitTorrent", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 51413, 53646, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6771, 51413, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKYPE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Skype", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKYPE_CALL, 0 /* can_have_a_subprotocol */, no_master, no_master, "SkypeCall", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_TIKTOK, 0 /* can_have_a_subprotocol */, no_master, no_master, "TikTok", NDPI_PROTOCOL_CATEGORY_SOCIAL_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEREDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Teredo", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3544, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WECHAT, 0 /* can_have_a_subprotocol */, no_master, /* wechat.com */ no_master, "WeChat", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MEMCACHED, 0 /* can_have_a_subprotocol */, no_master, no_master, "Memcached", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 11211, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 11211, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SMBV23, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMBv23", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 445, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_MINING, 0 /* can_have_a_subprotocol */, no_master, no_master, "Mining", CUSTOM_CATEGORY_MINING, ndpi_build_default_ports(ports_a, 8333, 30303, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NEST_LOG_SINK, 0 /* can_have_a_subprotocol */, no_master, no_master, "NestLogSink", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 11095, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MODBUS, 1 /* no subprotocol */, no_master, no_master, "Modbus", NDPI_PROTOCOL_CATEGORY_NETWORK, /* Perhaps IoT in the future */ ndpi_build_default_ports(ports_a, 502, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP_CALL, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsAppCall", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DATASAVER, 0 /* can_have_a_subprotocol */, no_master, no_master, "DataSaver", NDPI_PROTOCOL_CATEGORY_WEB /* dummy */, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SIGNAL, 0 /* can_have_a_subprotocol */, no_master, /* https://signal.org */ no_master, "Signal", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DOH_DOT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DoH_DoT", NDPI_PROTOCOL_CATEGORY_NETWORK /* dummy */, ndpi_build_default_ports(ports_a, 853, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_205, 0 /* can_have_a_subprotocol */, no_master, no_master, "FREE_205", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WIREGUARD, 0 /* can_have_a_subprotocol */, no_master, no_master, "WireGuard", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 51820, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPSTREAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPStream", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_XBOX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Xbox", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 3074, 3076, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3074, 3076, 500, 3544, 4500) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PLAYSTATION, 0 /* can_have_a_subprotocol */, no_master, no_master, "Playstation", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 1935, 3478, 3479, 3480, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3478, 3479, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_QQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "QQ", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_RTSP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTSP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 554, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 554, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ICECAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "IceCast", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPLIVE, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPLive", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PPSTREAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPStream", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ZATTOO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Zattoo", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SHOUTCAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "ShoutCast", NDPI_PROTOCOL_CATEGORY_MUSIC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SOPCAST, 0 /* can_have_a_subprotocol */, no_master, no_master, "Sopcast", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_58, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free58", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_TVUPLAYER, 0 /* can_have_a_subprotocol */, no_master, no_master, "TVUplayer", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_DOWNLOAD, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Download", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_QQLIVE, 0 /* can_have_a_subprotocol */, no_master, no_master, "QQLive", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_THUNDER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Thunder", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_SOULSEEK, 0 /* can_have_a_subprotocol */, no_master, no_master, "Soulseek", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_PS_VUE, 0 /* can_have_a_subprotocol */, no_master, no_master, "PS_VUE", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_IRC, 0 /* can_have_a_subprotocol */, no_master, no_master, "IRC", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 194, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 194, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AYIYA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Ayiya", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5072, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UNENCRYPTED_JABBER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Unencrypted_Jabber", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FREE_69, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free69", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FREE_71, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free71", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_VRRP, 0 /* can_have_a_subprotocol */, no_master, no_master, "VRRP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_STEAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Steam", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_HALFLIFE2, 0 /* can_have_a_subprotocol */, no_master, no_master, "HalfLife2", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WORLDOFWARCRAFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "WorldOfWarcraft", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_HOTSPOT_SHIELD, 0 /* can_have_a_subprotocol */, no_master, no_master, "HotspotShield", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_UNSAFE, NDPI_PROTOCOL_TELNET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Telnet", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 23, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); custom_master[0] = NDPI_PROTOCOL_SIP, custom_master[1] = NDPI_PROTOCOL_H323; ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_STUN, 0 /* can_have_a_subprotocol */, no_master, custom_master, "STUN", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3478, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_IP_IPSEC, 0 /* can_have_a_subprotocol */, no_master, no_master, "IPsec", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 500, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 500, 4500, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_GRE, 0 /* can_have_a_subprotocol */, no_master, no_master, "GRE", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_ICMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "ICMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_IGMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IGMP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_EGP, 0 /* can_have_a_subprotocol */, no_master, no_master, "EGP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_SCTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SCTP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_OSPF, 0 /* can_have_a_subprotocol */, no_master, no_master, "OSPF", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 2604, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_IP_IN_IP, 0 /* can_have_a_subprotocol */, no_master, no_master, "IP_in_IP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RDP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RDP", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 3389, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 3389, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VNC, 0 /* can_have_a_subprotocol */, no_master, no_master, "VNC", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5900, 5901, 5800, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_FREE90, 0 /* can_have_a_subprotocol */, no_master, no_master, "Free90", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5900, 5901, 5800, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZOOM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Zoom", NDPI_PROTOCOL_CATEGORY_VIDEO, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP_FILES, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsAppFiles", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHATSAPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "WhatsApp", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_TLS, 1 /* can_have_a_subprotocol */, no_master, no_master, "TLS", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 443, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SSH, 0 /* can_have_a_subprotocol */, no_master, no_master, "SSH", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 22, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_USENET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Usenet", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MGCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "MGCP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IAX, 0 /* can_have_a_subprotocol */, no_master, no_master, "IAX", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 4569, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 4569, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AFP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AFP", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 548, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 548, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_HULU, 0 /* can_have_a_subprotocol */, no_master, no_master, "Hulu", NDPI_PROTOCOL_CATEGORY_STREAMING, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CHECKMK, 0 /* can_have_a_subprotocol */, no_master, no_master, "CHECKMK", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 6556, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_STEALTHNET, 0 /* can_have_a_subprotocol */, no_master, no_master, "Stealthnet", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_AIMINI, 0 /* can_have_a_subprotocol */, no_master, no_master, "Aimini", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SIP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SIP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 5060, 5061, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5060, 5061, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TRUPHONE, 0 /* can_have_a_subprotocol */, no_master, no_master, "TruPhone", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IP_ICMPV6, 0 /* can_have_a_subprotocol */, no_master, no_master, "ICMPV6", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DHCPV6, 0 /* can_have_a_subprotocol */, no_master, no_master, "DHCPV6", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_ARMAGETRON, 0 /* can_have_a_subprotocol */, no_master, no_master, "Armagetron", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_CROSSFIRE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Crossfire", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_DOFUS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Dofus", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FIESTA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Fiesta", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_FLORENSIA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Florensia", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_GUILDWARS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Guildwars", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_ACTIVESYNC, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_ActiveSync", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_KERBEROS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Kerberos", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 88, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 88, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LDAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "LDAP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 389, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 389, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_MAPLESTORY, 0 /* can_have_a_subprotocol */, no_master, no_master, "MapleStory", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MSSQL_TDS, 0 /* can_have_a_subprotocol */, no_master, no_master, "MsSQL-TDS", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 1433, 1434, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_PPTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "PPTP", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WARCRAFT3, 0 /* can_have_a_subprotocol */, no_master, no_master, "Warcraft3", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_WORLD_OF_KUNG_FU, 0 /* can_have_a_subprotocol */, no_master, no_master, "WorldOfKungFu", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DCERPC, 0 /* can_have_a_subprotocol */, no_master, no_master, "DCE_RPC", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 135, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NETFLOW, 0 /* can_have_a_subprotocol */, no_master, no_master, "NetFlow", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2055, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SFLOW, 0 /* can_have_a_subprotocol */, no_master, no_master, "sFlow", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6343, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_CONNECT, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Connect", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HTTP_PROXY, 1 /* can_have_a_subprotocol */, no_master, no_master, "HTTP_Proxy", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 8080, 3128, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CITRIX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Citrix", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1494, 2598, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WEBEX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Webex", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RADIUS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Radius", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1812, 1813, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1812, 1813, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEAMVIEWER, 0 /* can_have_a_subprotocol */, no_master, no_master, "TeamViewer", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 5938, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5938, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LOTUS_NOTES, 0 /* can_have_a_subprotocol */, no_master, no_master, "LotusNotes", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 1352, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SAP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 3201, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_GTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "GTP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 2152, 2123, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UPNP, 0 /* can_have_a_subprotocol */, no_master, no_master, "UPnP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 1780, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1900, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TELEGRAM, 0 /* can_have_a_subprotocol */, no_master, no_master, "Telegram", NDPI_PROTOCOL_CATEGORY_CHAT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_QUIC, 1 /* can_have_a_subprotocol */, no_master, no_master, "QUIC", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 443, 80, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DIAMETER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Diameter", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 3868, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_APPLE_PUSH, 0 /* can_have_a_subprotocol */, no_master, no_master, "ApplePush", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DROPBOX, 0 /* can_have_a_subprotocol */, no_master, no_master, "Dropbox", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 17500, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SPOTIFY, 0 /* can_have_a_subprotocol */, no_master, no_master, "Spotify", NDPI_PROTOCOL_CATEGORY_MUSIC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MESSENGER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Messenger", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LISP, 0 /* can_have_a_subprotocol */, no_master, no_master, "LISP", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 4342, 4341, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_EAQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "EAQ", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6000, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_KAKAOTALK_VOICE, 0 /* can_have_a_subprotocol */, no_master, no_master, "KakaoTalk_Voice", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_MPEGTS, 0 /* can_have_a_subprotocol */, no_master, no_master, "MPEG_TS", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); /* http://en.wikipedia.org/wiki/Link-local_Multicast_Name_Resolution */ ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_LLMNR, 0 /* can_have_a_subprotocol */, no_master, no_master, "LLMNR", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 5355, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5355, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_POTENTIALLY_DANGEROUS, NDPI_PROTOCOL_REMOTE_SCAN, 0 /* can_have_a_subprotocol */, no_master, no_master, "RemoteScan", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 6077, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 6078, 0, 0, 0, 0) /* UDP */); /* Missing dissector: port based only */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_H323, 0 /* can_have_a_subprotocol */, no_master, no_master, "H323", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 1719, 1720, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1719, 1720, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_OPENVPN, 0 /* can_have_a_subprotocol */, no_master, no_master, "OpenVPN", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 1194, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 1194, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_NOE, 0 /* can_have_a_subprotocol */, no_master, no_master, "NOE", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CISCOVPN, 0 /* can_have_a_subprotocol */, no_master, no_master, "CiscoVPN", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 10000, 8008, 8009, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 10000, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TEAMSPEAK, 0 /* can_have_a_subprotocol */, no_master, no_master, "TeamSpeak", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SKINNY, 0 /* can_have_a_subprotocol */, no_master, no_master, "CiscoSkinny", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 2000, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTCP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTCP", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RSYNC, 0 /* can_have_a_subprotocol */, no_master, no_master, "RSYNC", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 873, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ORACLE, 0 /* can_have_a_subprotocol */, no_master, no_master, "Oracle", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 1521, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CORBA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Corba", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_UBUNTUONE, 0 /* can_have_a_subprotocol */, no_master, no_master, "UbuntuONE", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WHOIS_DAS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Whois-DAS", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 43, 4343, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_COLLECTD, 0 /* can_have_a_subprotocol */, no_master, no_master, "Collectd", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 25826, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SOCKS, 0 /* can_have_a_subprotocol */, no_master, no_master, "SOCKS", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 1080, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 1080, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TFTP, 0 /* can_have_a_subprotocol */, no_master, no_master, "TFTP", NDPI_PROTOCOL_CATEGORY_DATA_TRANSFER, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 69, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RTMP, 0 /* can_have_a_subprotocol */, no_master, no_master, "RTMP", NDPI_PROTOCOL_CATEGORY_MEDIA, ndpi_build_default_ports(ports_a, 1935, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_PANDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Pando_Media_Booster", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MEGACO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Megaco", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 2944, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_REDIS, 0 /* can_have_a_subprotocol */, no_master, no_master, "Redis", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 6379, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZMQ, 0 /* can_have_a_subprotocol */, no_master, no_master, "ZeroMQ", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_VHUA, 0 /* can_have_a_subprotocol */, no_master, no_master, "VHUA", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 58267, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_STARCRAFT, 0 /* can_have_a_subprotocol */, no_master, no_master, "Starcraft", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 1119, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 1119, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_UBNTAC2, 0 /* can_have_a_subprotocol */, no_master, no_master, "UBNTAC2", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 10001, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_VIBER, 0 /* can_have_a_subprotocol */, no_master, no_master, "Viber", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 7985, 5242, 5243, 4244, 0), /* TCP */ ndpi_build_default_ports(ports_b, 7985, 7987, 5242, 5243, 4244)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_COAP, 0 /* can_have_a_subprotocol */, no_master, no_master, "COAP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 5683, 5684, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_MQTT, 0 /* can_have_a_subprotocol */, no_master, no_master, "MQTT", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 1883, 8883, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SOMEIP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SOMEIP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 30491, 30501, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 30491, 30501, 30490, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_RX, 0 /* can_have_a_subprotocol */, no_master, no_master, "RX", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_GIT, 0 /* can_have_a_subprotocol */, no_master, no_master, "Git", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 9418, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DRDA, 0 /* can_have_a_subprotocol */, no_master, no_master, "DRDA", NDPI_PROTOCOL_CATEGORY_DATABASE, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_HANGOUT_DUO, 0 /* can_have_a_subprotocol */, no_master, no_master, "GoogleHangoutDuo", NDPI_PROTOCOL_CATEGORY_VOIP, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BJNP, 0 /* can_have_a_subprotocol */, no_master, no_master, "BJNP", NDPI_PROTOCOL_CATEGORY_SYSTEM_OS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 8612, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_SMPP, 0 /* can_have_a_subprotocol */, no_master, no_master, "SMPP", NDPI_PROTOCOL_CATEGORY_DOWNLOAD_FT, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_OOKLA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Ookla", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AMQP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AMQP", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_DNSCRYPT, 0 /* can_have_a_subprotocol */, no_master, no_master, "DNScrypt", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0), /* TCP */ ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0)); /* UDP */ ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TINC, 0 /* can_have_a_subprotocol */, no_master, no_master, "TINC", NDPI_PROTOCOL_CATEGORY_VPN, ndpi_build_default_ports(ports_a, 655, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 655, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_FIX, 0 /* can_have_a_subprotocol */, no_master, no_master, "FIX", NDPI_PROTOCOL_CATEGORY_RPC, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_NINTENDO, 0 /* can_have_a_subprotocol */, no_master, no_master, "Nintendo", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_FUN, NDPI_PROTOCOL_CSGO, 0 /* can_have_a_subprotocol */, no_master, no_master, "CSGO", NDPI_PROTOCOL_CATEGORY_GAME, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AJP, 0 /* can_have_a_subprotocol */, no_master, no_master, "AJP", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 8009, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_TARGUS_GETDATA, 0 /* can_have_a_subprotocol */, no_master, no_master, "Targus Dataspeed", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 5001, 5201, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5001, 5201, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_AMAZON_VIDEO, 0 /* can_have_a_subprotocol */, no_master, no_master, "AmazonVideo", NDPI_PROTOCOL_CATEGORY_CLOUD, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_DNP3, 1 /* no subprotocol */, no_master, no_master, "DNP3", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 20000, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_IEC60870, 1 /* no subprotocol */, no_master, no_master, "IEC60870", NDPI_PROTOCOL_CATEGORY_NETWORK, /* Perhaps IoT in the future */ ndpi_build_default_ports(ports_a, 2404, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_BLOOMBERG, 1 /* no subprotocol */, no_master, no_master, "Bloomberg", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_CAPWAP, 1 /* no subprotocol */, no_master, no_master, "CAPWAP", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 5246, 5247, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ZABBIX, 1 /* no subprotocol */, no_master, no_master, "Zabbix", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 10050, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_S7COMM, 1 /* no subprotocol */, no_master, no_master, "s7comm", NDPI_PROTOCOL_CATEGORY_NETWORK, ndpi_build_default_ports(ports_a, 102, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_SAFE, NDPI_PROTOCOL_MSTEAMS, 1 /* no subprotocol */, no_master, no_master, "Teams", NDPI_PROTOCOL_CATEGORY_COLLABORATIVE, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */ ); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_WEBSOCKET, 1 /* can_have_a_subprotocol */, no_master, no_master, "WebSocket", NDPI_PROTOCOL_CATEGORY_WEB, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); ndpi_set_proto_defaults(ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, NDPI_PROTOCOL_ANYDESK, 1 /* no subprotocol */, no_master, no_master, "AnyDesk", NDPI_PROTOCOL_CATEGORY_REMOTE_ACCESS, ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_main.c" #endif /* calling function for host and content matched protocols */ init_string_based_protocols(ndpi_str); ndpi_validate_protocol_initialization(ndpi_str); } /* ****************************************************** */ #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_protocols.c" #endif /* ****************************************************** */ static int ac_match_handler(AC_MATCH_t *m, AC_TEXT_t *txt, AC_REP_t *match) { int min_len = (txt->length < m->patterns->length) ? txt->length : m->patterns->length; char buf[64] = {'\0'}, *whatfound; int min_buf_len = (txt->length > 63 /* sizeof(buf)-1 */) ? 63 : txt->length; u_int buf_len = strlen(buf); strncpy(buf, txt->astring, min_buf_len); buf[min_buf_len] = '\0'; #ifdef MATCH_DEBUG printf("Searching [to search: %s/%u][pattern: %s/%u] [len: %d][match_num: %u][%s]\n", buf, (unigned int) txt->length, m->patterns->astring, (unigned int) m->patterns->length, min_len, m->match_num, m->patterns->astring); #endif whatfound = strstr(buf, m->patterns->astring); #ifdef MATCH_DEBUG printf("[NDPI] %s() [searching=%s][pattern=%s][%s][%c]\n", __FUNCTION__, buf, m->patterns->astring, whatfound ? whatfound : "<NULL>", whatfound[-1]); #endif if(whatfound) { /* The patch below allows in case of pattern ws.amazon.com to avoid matching aws.amazon.com whereas a.ws.amazon.com has to match */ if((whatfound != buf) && (m->patterns->astring[0] != '.') /* The searched pattern does not start with . */ && strchr(m->patterns->astring, '.') /* The matched pattern has a . (e.g. numeric or sym IPs) */) { int len = strlen(m->patterns->astring); if((whatfound[-1] != '.') || ((m->patterns->astring[len - 1] != '.') && (whatfound[len] != '\0') /* endsWith does not hold here */)) { return(0); } else { memcpy(match, &m->patterns[0].rep, sizeof(AC_REP_t)); /* Partial match? */ return(0); /* Keep searching as probably there is a better match */ } } } /* Return 1 for stopping to the first match. We might consider searching for the more specific match, paying more cpu cycles. */ memcpy(match, &m->patterns[0].rep, sizeof(AC_REP_t)); if(((buf_len >= min_len) && (strncmp(&buf[buf_len - min_len], m->patterns->astring, min_len) == 0)) || (strncmp(buf, m->patterns->astring, min_len) == 0) /* begins with */ ) { #ifdef MATCH_DEBUG printf("Found match [%s][%s] [len: %d]" // "[proto_id: %u]" "\n", buf, m->patterns->astring, min_len /* , *matching_protocol_id */); #endif return(1); /* If the pattern found matches the string at the beginning we stop here */ } else { #ifdef MATCH_DEBUG printf("NO match found: continue\n"); #endif return(0); /* 0 to continue searching, !0 to stop */ } } /* ******************************************************************** */ static int fill_prefix_v4(prefix_t *p, const struct in_addr *a, int b, int mb) { if(b < 0 || b > mb) return(-1); memset(p, 0, sizeof(prefix_t)); memcpy(&p->add.sin, a, (mb + 7) / 8); p->family = AF_INET; p->bitlen = b; p->ref_count = 0; return(0); } /* ******************************************* */ static int fill_prefix_v6(prefix_t *prefix, const struct in6_addr *addr, int bits, int maxbits) { #ifdef PATRICIA_IPV6 if(bits < 0 || bits > maxbits) return -1; memcpy(&prefix->add.sin6, addr, (maxbits + 7) / 8); prefix->family = AF_INET6, prefix->bitlen = bits, prefix->ref_count = 0; return 0; #else return(-1); #endif } /* ******************************************* */ u_int16_t ndpi_network_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin /* network byte order */) { prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->protocols_ptree, &prefix); return(node ? node->value.uv.user_value : NDPI_PROTOCOL_UNKNOWN); } /* ******************************************* */ u_int16_t ndpi_network_port_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin /* network byte order */, u_int16_t port /* network byte order */) { prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->protocols_ptree, &prefix); if(node) { if((node->value.uv.additional_user_value == 0) || (node->value.uv.additional_user_value == port)) return(node->value.uv.user_value); } return(NDPI_PROTOCOL_UNKNOWN); } /* ******************************************* */ #if 0 static u_int8_t tor_ptree_match(struct ndpi_detection_module_struct *ndpi_str, struct in_addr *pin) { return((ndpi_network_ptree_match(ndpi_str, pin) == NDPI_PROTOCOL_TOR) ? 1 : 0); } #endif /* ******************************************* */ u_int8_t ndpi_is_tor_flow(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; if(packet->tcp != NULL) { if(packet->iph) { if(flow->guessed_host_protocol_id == NDPI_PROTOCOL_TOR) return(1); } } return(0); } /* ******************************************* */ static patricia_node_t *add_to_ptree(patricia_tree_t *tree, int family, void *addr, int bits) { prefix_t prefix; patricia_node_t *node; fill_prefix_v4(&prefix, (struct in_addr *) addr, bits, tree->maxbits); node = ndpi_patricia_lookup(tree, &prefix); if(node) memset(&node->value, 0, sizeof(node->value)); return(node); } /* ******************************************* */ /* Load a file containing IPv4 addresses in CIDR format as 'protocol_id' Return: the number of entries loaded or -1 in case of error */ int ndpi_load_ipv4_ptree(struct ndpi_detection_module_struct *ndpi_str, const char *path, u_int16_t protocol_id) { char buffer[128], *line, *addr, *cidr, *saveptr; FILE *fd; int len; u_int num_loaded = 0; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); return(-1); } while (1) { line = fgets(buffer, sizeof(buffer), fd); if(line == NULL) break; len = strlen(line); if((len <= 1) || (line[0] == '#')) continue; line[len - 1] = '\0'; addr = strtok_r(line, "/", &saveptr); if(addr) { struct in_addr pin; patricia_node_t *node; cidr = strtok_r(NULL, "\n", &saveptr); pin.s_addr = inet_addr(addr); if((node = add_to_ptree(ndpi_str->protocols_ptree, AF_INET, &pin, cidr ? atoi(cidr) : 32 /* bits */)) != NULL) { node->value.uv.user_value = protocol_id, node->value.uv.additional_user_value = 0 /* port */; num_loaded++; } } } fclose(fd); return(num_loaded); } /* ******************************************* */ static void ndpi_init_ptree_ipv4(struct ndpi_detection_module_struct *ndpi_str, void *ptree, ndpi_network host_list[], u_int8_t skip_tor_hosts) { int i; for (i = 0; host_list[i].network != 0x0; i++) { struct in_addr pin; patricia_node_t *node; if(skip_tor_hosts && (host_list[i].value == NDPI_PROTOCOL_TOR)) continue; pin.s_addr = htonl(host_list[i].network); if((node = add_to_ptree(ptree, AF_INET, &pin, host_list[i].cidr /* bits */)) != NULL) { node->value.uv.user_value = host_list[i].value, node->value.uv.additional_user_value = 0; } } } /* ******************************************* */ static int ndpi_add_host_ip_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *value, u_int16_t protocol_id) { patricia_node_t *node; struct in_addr pin; int bits = 32; char *ptr = strrchr(value, '/'); u_int16_t port = 0; /* Format ip:8.248.73.247:443 */ char *double_column; if(ptr) { ptr[0] = '\0'; ptr++; if((double_column = strrchr(ptr, ':')) != NULL) { double_column[0] = '\0'; port = atoi(&double_column[1]); } if(atoi(ptr) >= 0 && atoi(ptr) <= 32) bits = atoi(ptr); } else { /* Let's check if there is the port defined Example: ip:8.248.73.247:443@AmazonPrime */ double_column = strrchr(value, ':'); if(double_column) { double_column[0] = '\0'; port = atoi(&double_column[1]); } } inet_pton(AF_INET, value, &pin); if((node = add_to_ptree(ndpi_str->protocols_ptree, AF_INET, &pin, bits)) != NULL) { node->value.uv.user_value = protocol_id, node->value.uv.additional_user_value = htons(port); } return(0); } void set_ndpi_malloc(void *(*__ndpi_malloc)(size_t size)) { _ndpi_malloc = __ndpi_malloc; } void set_ndpi_flow_malloc(void *(*__ndpi_flow_malloc)(size_t size)) { _ndpi_flow_malloc = __ndpi_flow_malloc; } void set_ndpi_free(void (*__ndpi_free)(void *ptr)) { _ndpi_free = __ndpi_free; } void set_ndpi_flow_free(void (*__ndpi_flow_free)(void *ptr)) { _ndpi_flow_free = __ndpi_flow_free; } void ndpi_debug_printf(unsigned int proto, struct ndpi_detection_module_struct *ndpi_str, ndpi_log_level_t log_level, const char *file_name, const char *func_name, int line_number, const char *format, ...) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES va_list args; #define MAX_STR_LEN 250 char str[MAX_STR_LEN]; if(ndpi_str != NULL && log_level > NDPI_LOG_ERROR && proto > 0 && proto < NDPI_MAX_SUPPORTED_PROTOCOLS && !NDPI_ISSET(&ndpi_str->debug_bitmask, proto)) return; va_start(args, format); vsnprintf(str, sizeof(str) - 1, format, args); va_end(args); if(ndpi_str != NULL) { printf("%s:%s:%-3d - [%s]: %s", file_name, func_name, line_number, ndpi_get_proto_name(ndpi_str, proto), str); } else { printf("Proto: %u, %s", proto, str); } #endif } void set_ndpi_debug_function(struct ndpi_detection_module_struct *ndpi_str, ndpi_debug_function_ptr ndpi_debug_printf) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES ndpi_str->ndpi_debug_printf = ndpi_debug_printf; #endif } /* ****************************************** */ /* Keep it in order and in sync with ndpi_protocol_category_t in ndpi_typedefs.h */ static const char *categories[] = { "Unspecified", "Media", "VPN", "Email", "DataTransfer", "Web", "SocialNetwork", "Download-FileTransfer-FileSharing", "Game", "Chat", "VoIP", "Database", "RemoteAccess", "Cloud", "Network", "Collaborative", "RPC", "Streaming", "System", "SoftwareUpdate", "", "", "", "", "", "Music", "Video", "Shopping", "Productivity", "FileSharing", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "Mining", /* 99 */ "Malware", "Advertisement", "Banned_Site", "Site_Unavailable", "Allowed_Site", "Antimalware", }; /* ******************************************************************** */ struct ndpi_detection_module_struct *ndpi_init_detection_module(ndpi_init_prefs prefs) { struct ndpi_detection_module_struct *ndpi_str = ndpi_malloc(sizeof(struct ndpi_detection_module_struct)); int i; if(ndpi_str == NULL) { #ifdef NDPI_ENABLE_DEBUG_MESSAGES NDPI_LOG_ERR(ndpi_str, "ndpi_init_detection_module initial malloc failed for ndpi_str\n"); #endif /* NDPI_ENABLE_DEBUG_MESSAGES */ return(NULL); } memset(ndpi_str, 0, sizeof(struct ndpi_detection_module_struct)); #ifdef NDPI_ENABLE_DEBUG_MESSAGES set_ndpi_debug_function(ndpi_str, (ndpi_debug_function_ptr) ndpi_debug_printf); #endif /* NDPI_ENABLE_DEBUG_MESSAGES */ if((ndpi_str->protocols_ptree = ndpi_New_Patricia(32 /* IPv4 */)) != NULL) ndpi_init_ptree_ipv4(ndpi_str, ndpi_str->protocols_ptree, host_protocol_list, prefs & ndpi_dont_load_tor_hosts); NDPI_BITMASK_RESET(ndpi_str->detection_bitmask); #ifdef NDPI_ENABLE_DEBUG_MESSAGES ndpi_str->user_data = NULL; #endif ndpi_str->ticks_per_second = 1000; /* ndpi_str->ticks_per_second */ ndpi_str->tcp_max_retransmission_window_size = NDPI_DEFAULT_MAX_TCP_RETRANSMISSION_WINDOW_SIZE; ndpi_str->directconnect_connection_ip_tick_timeout = NDPI_DIRECTCONNECT_CONNECTION_IP_TICK_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->rtsp_connection_timeout = NDPI_RTSP_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->irc_timeout = NDPI_IRC_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->gnutella_timeout = NDPI_GNUTELLA_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->thunder_timeout = NDPI_THUNDER_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->zattoo_connection_timeout = NDPI_ZATTOO_CONNECTION_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->jabber_stun_timeout = NDPI_JABBER_STUN_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->jabber_file_transfer_timeout = NDPI_JABBER_FT_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->soulseek_connection_ip_tick_timeout = NDPI_SOULSEEK_CONNECTION_IP_TICK_TIMEOUT * ndpi_str->ticks_per_second; ndpi_str->ndpi_num_supported_protocols = NDPI_MAX_SUPPORTED_PROTOCOLS; ndpi_str->ndpi_num_custom_protocols = 0; ndpi_str->host_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->content_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->bigrams_automa.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->impossible_bigrams_automa.ac_automa = ac_automata_init(ac_match_handler); if((sizeof(categories) / sizeof(char *)) != NDPI_PROTOCOL_NUM_CATEGORIES) { NDPI_LOG_ERR(ndpi_str, "[NDPI] invalid categories length: expected %u, got %u\n", NDPI_PROTOCOL_NUM_CATEGORIES, (unsigned int) (sizeof(categories) / sizeof(char *))); return(NULL); } ndpi_str->custom_categories.hostnames.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->custom_categories.hostnames_shadow.ac_automa = ac_automata_init(ac_match_handler); ndpi_str->custom_categories.ipAddresses = ndpi_New_Patricia(32 /* IPv4 */); ndpi_str->custom_categories.ipAddresses_shadow = ndpi_New_Patricia(32 /* IPv4 */); if((ndpi_str->custom_categories.ipAddresses == NULL) || (ndpi_str->custom_categories.ipAddresses_shadow == NULL)) return(NULL); ndpi_init_protocol_defaults(ndpi_str); for (i = 0; i < NUM_CUSTOM_CATEGORIES; i++) snprintf(ndpi_str->custom_category_labels[i], CUSTOM_CATEGORY_LABEL_LEN, "User custom category %u", (unsigned int) (i + 1)); return(ndpi_str); } /* *********************************************** */ void ndpi_finalize_initalization(struct ndpi_detection_module_struct *ndpi_str) { u_int i; for (i = 0; i < 4; i++) { ndpi_automa *automa; switch (i) { case 0: automa = &ndpi_str->host_automa; break; case 1: automa = &ndpi_str->content_automa; break; case 2: automa = &ndpi_str->bigrams_automa; break; case 3: automa = &ndpi_str->impossible_bigrams_automa; break; default: automa = NULL; break; } if(automa) { ac_automata_finalize((AC_AUTOMATA_t *) automa->ac_automa); automa->ac_automa_finalized = 1; } } } /* *********************************************** */ /* Wrappers */ void *ndpi_init_automa(void) { return(ac_automata_init(ac_match_handler)); } /* ****************************************************** */ int ndpi_add_string_value_to_automa(void *_automa, char *str, u_int32_t num) { AC_PATTERN_t ac_pattern; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_ERROR_t rc; if(automa == NULL) return(-1); memset(&ac_pattern, 0, sizeof(ac_pattern)); ac_pattern.astring = str; ac_pattern.rep.number = num; ac_pattern.length = strlen(ac_pattern.astring); rc = ac_automata_add(automa, &ac_pattern); return(rc == ACERR_SUCCESS || rc == ACERR_DUPLICATE_PATTERN ? 0 : -1); } /* ****************************************************** */ int ndpi_add_string_to_automa(void *_automa, char *str) { return(ndpi_add_string_value_to_automa(_automa, str, 1)); } /* ****************************************************** */ void ndpi_free_automa(void *_automa) { ac_automata_release((AC_AUTOMATA_t *) _automa, 0); } /* ****************************************************** */ void ndpi_finalize_automa(void *_automa) { ac_automata_finalize((AC_AUTOMATA_t *) _automa); } /* ****************************************************** */ int ndpi_match_string(void *_automa, char *string_to_match) { AC_REP_t match = { NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; int rc; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = strlen(string_to_match); rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; return(rc ? match.number : 0); } /* ****************************************************** */ int ndpi_match_string_protocol_id(void *_automa, char *string_to_match, u_int match_len, u_int16_t *protocol_id, ndpi_protocol_category_t *category, ndpi_protocol_breed_t *breed) { AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_REP_t match = { 0, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; int rc; *protocol_id = (u_int16_t)-1; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = match_len; rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; if(rc) *protocol_id = (u_int16_t)match.number, *category = match.category, *breed = match.breed; else *protocol_id = NDPI_PROTOCOL_UNKNOWN; return((*protocol_id != NDPI_PROTOCOL_UNKNOWN) ? 0 : -1); } /* ****************************************************** */ int ndpi_match_string_value(void *_automa, char *string_to_match, u_int match_len, u_int32_t *num) { AC_TEXT_t ac_input_text; AC_AUTOMATA_t *automa = (AC_AUTOMATA_t *) _automa; AC_REP_t match = { 0, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED }; int rc; *num = (u_int32_t)-1; if((automa == NULL) || (string_to_match == NULL) || (string_to_match[0] == '\0')) return(-2); ac_input_text.astring = string_to_match, ac_input_text.length = match_len; rc = ac_automata_search(automa, &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; if(rc) *num = match.number; else *num = 0; return(rc ? 0 : -1); } /* *********************************************** */ int ndpi_match_custom_category(struct ndpi_detection_module_struct *ndpi_str, char *name, u_int name_len, ndpi_protocol_category_t *category) { ndpi_protocol_breed_t breed; u_int16_t id; int rc = ndpi_match_string_protocol_id(ndpi_str->custom_categories.hostnames.ac_automa, name, name_len, &id, category, &breed); return(rc); } /* *********************************************** */ int ndpi_get_custom_category_match(struct ndpi_detection_module_struct *ndpi_str, char *name_or_ip, u_int name_len, ndpi_protocol_category_t *id) { char ipbuf[64], *ptr; struct in_addr pin; u_int cp_len = ndpi_min(sizeof(ipbuf) - 1, name_len); if(!ndpi_str->custom_categories.categories_loaded) return(-1); if(cp_len > 0) { memcpy(ipbuf, name_or_ip, cp_len); ipbuf[cp_len] = '\0'; } else ipbuf[0] = '\0'; ptr = strrchr(ipbuf, '/'); if(ptr) ptr[0] = '\0'; if(inet_pton(AF_INET, ipbuf, &pin) == 1) { /* Search IP */ prefix_t prefix; patricia_node_t *node; /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, &pin, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); if(node) { *id = node->value.uv.user_value; return(0); } return(-1); } else { /* Search Host */ return(ndpi_match_custom_category(ndpi_str, name_or_ip, name_len, id)); } } /* *********************************************** */ static void free_ptree_data(void *data) { ; } /* ****************************************************** */ void ndpi_exit_detection_module(struct ndpi_detection_module_struct *ndpi_str) { if(ndpi_str != NULL) { int i; for (i = 0; i < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS); i++) { if(ndpi_str->proto_defaults[i].protoName) ndpi_free(ndpi_str->proto_defaults[i].protoName); } /* NDPI_PROTOCOL_TINC */ if(ndpi_str->tinc_cache) cache_free((cache_t)(ndpi_str->tinc_cache)); if(ndpi_str->ookla_cache) ndpi_lru_free_cache(ndpi_str->ookla_cache); if(ndpi_str->stun_cache) ndpi_lru_free_cache(ndpi_str->stun_cache); if(ndpi_str->msteams_cache) ndpi_lru_free_cache(ndpi_str->msteams_cache); if(ndpi_str->protocols_ptree) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->protocols_ptree, free_ptree_data); if(ndpi_str->udpRoot != NULL) ndpi_tdestroy(ndpi_str->udpRoot, ndpi_free); if(ndpi_str->tcpRoot != NULL) ndpi_tdestroy(ndpi_str->tcpRoot, ndpi_free); if(ndpi_str->host_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->host_automa.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->content_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->content_automa.ac_automa, 0); if(ndpi_str->bigrams_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->bigrams_automa.ac_automa, 0); if(ndpi_str->impossible_bigrams_automa.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->impossible_bigrams_automa.ac_automa, 0); if(ndpi_str->custom_categories.hostnames.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->custom_categories.hostnames_shadow.ac_automa != NULL) ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames_shadow.ac_automa, 1 /* free patterns strings memory */); if(ndpi_str->custom_categories.ipAddresses != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses, free_ptree_data); if(ndpi_str->custom_categories.ipAddresses_shadow != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses_shadow, free_ptree_data); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/ndpi_exit_detection_module.c" #endif ndpi_free(ndpi_str); } } /* ****************************************************** */ int ndpi_get_protocol_id_master_proto(struct ndpi_detection_module_struct *ndpi_str, u_int16_t protocol_id, u_int16_t **tcp_master_proto, u_int16_t **udp_master_proto) { if(protocol_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) { *tcp_master_proto = ndpi_str->proto_defaults[NDPI_PROTOCOL_UNKNOWN].master_tcp_protoId, *udp_master_proto = ndpi_str->proto_defaults[NDPI_PROTOCOL_UNKNOWN].master_udp_protoId; return(-1); } *tcp_master_proto = ndpi_str->proto_defaults[protocol_id].master_tcp_protoId, *udp_master_proto = ndpi_str->proto_defaults[protocol_id].master_udp_protoId; return(0); } /* ****************************************************** */ static ndpi_default_ports_tree_node_t *ndpi_get_guessed_protocol_id(struct ndpi_detection_module_struct *ndpi_str, u_int8_t proto, u_int16_t sport, u_int16_t dport) { ndpi_default_ports_tree_node_t node; if(sport && dport) { int low = ndpi_min(sport, dport); int high = ndpi_max(sport, dport); const void *ret; node.default_port = low; /* Check server port first */ ret = ndpi_tfind(&node, (proto == IPPROTO_TCP) ? (void *) &ndpi_str->tcpRoot : (void *) &ndpi_str->udpRoot, ndpi_default_ports_tree_node_t_cmp); if(ret == NULL) { node.default_port = high; ret = ndpi_tfind(&node, (proto == IPPROTO_TCP) ? (void *) &ndpi_str->tcpRoot : (void *) &ndpi_str->udpRoot, ndpi_default_ports_tree_node_t_cmp); } if(ret) return(*(ndpi_default_ports_tree_node_t **) ret); } return(NULL); } /* ****************************************************** */ /* These are UDP protocols that must fit a single packet and thus that if have NOT been detected they cannot be guessed as they have been excluded */ u_int8_t is_udp_guessable_protocol(u_int16_t l7_guessed_proto) { switch (l7_guessed_proto) { case NDPI_PROTOCOL_QUIC: case NDPI_PROTOCOL_SNMP: case NDPI_PROTOCOL_NETFLOW: /* TODO: add more protocols (if any missing) */ return(1); } return(0); } /* ****************************************************** */ u_int16_t ndpi_guess_protocol_id(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t proto, u_int16_t sport, u_int16_t dport, u_int8_t *user_defined_proto) { *user_defined_proto = 0; /* Default */ if(sport && dport) { ndpi_default_ports_tree_node_t *found = ndpi_get_guessed_protocol_id(ndpi_str, proto, sport, dport); if(found != NULL) { u_int16_t guessed_proto = found->proto->protoId; /* We need to check if the guessed protocol isn't excluded by nDPI */ if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, guessed_proto) && is_udp_guessable_protocol(guessed_proto)) return(NDPI_PROTOCOL_UNKNOWN); else { *user_defined_proto = found->customUserProto; return(guessed_proto); } } } else { /* No TCP/UDP */ switch (proto) { case NDPI_IPSEC_PROTOCOL_ESP: case NDPI_IPSEC_PROTOCOL_AH: return(NDPI_PROTOCOL_IP_IPSEC); break; case NDPI_GRE_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_GRE); break; case NDPI_ICMP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_ICMP); break; case NDPI_IGMP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_IGMP); break; case NDPI_EGP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_EGP); break; case NDPI_SCTP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_SCTP); break; case NDPI_OSPF_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_OSPF); break; case NDPI_IPIP_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_IP_IN_IP); break; case NDPI_ICMPV6_PROTOCOL_TYPE: return(NDPI_PROTOCOL_IP_ICMPV6); break; case 112: return(NDPI_PROTOCOL_IP_VRRP); break; } } return(NDPI_PROTOCOL_UNKNOWN); } /* ******************************************************************** */ u_int ndpi_get_num_supported_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_supported_protocols); } /* ******************************************************************** */ #ifdef WIN32 char *strsep(char **sp, char *sep) { char *p, *s; if(sp == NULL || *sp == NULL || **sp == '\0') return(NULL); s = *sp; p = s + strcspn(s, sep); if(*p != '\0') *p++ = '\0'; *sp = p; return(s); } #endif /* ******************************************************************** */ int ndpi_handle_rule(struct ndpi_detection_module_struct *ndpi_str, char *rule, u_int8_t do_add) { char *at, *proto, *elem; ndpi_proto_defaults_t *def; u_int16_t subprotocol_id, i; at = strrchr(rule, '@'); if(at == NULL) { NDPI_LOG_ERR(ndpi_str, "Invalid rule '%s'\n", rule); return(-1); } else at[0] = 0, proto = &at[1]; for (i = 0; proto[i] != '\0'; i++) { switch (proto[i]) { case '/': case '&': case '^': case ':': case ';': case '\'': case '"': case ' ': proto[i] = '_'; break; } } for (i = 0, def = NULL; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) { if(ndpi_str->proto_defaults[i].protoName && strcasecmp(ndpi_str->proto_defaults[i].protoName, proto) == 0) { def = &ndpi_str->proto_defaults[i]; subprotocol_id = i; break; } } if(def == NULL) { if(!do_add) { /* We need to remove a rule */ NDPI_LOG_ERR(ndpi_str, "Unable to find protocol '%s': skipping rule '%s'\n", proto, rule); return(-3); } else { ndpi_port_range ports_a[MAX_DEFAULT_PORTS], ports_b[MAX_DEFAULT_PORTS]; u_int16_t no_master[2] = {NDPI_PROTOCOL_NO_MASTER_PROTO, NDPI_PROTOCOL_NO_MASTER_PROTO}; if(ndpi_str->ndpi_num_custom_protocols >= (NDPI_MAX_NUM_CUSTOM_PROTOCOLS - 1)) { NDPI_LOG_ERR(ndpi_str, "Too many protocols defined (%u): skipping protocol %s\n", ndpi_str->ndpi_num_custom_protocols, proto); return(-2); } ndpi_set_proto_defaults( ndpi_str, NDPI_PROTOCOL_ACCEPTABLE, ndpi_str->ndpi_num_supported_protocols, 0 /* can_have_a_subprotocol */, no_master, no_master, proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, /* TODO add protocol category support in rules */ ndpi_build_default_ports(ports_a, 0, 0, 0, 0, 0) /* TCP */, ndpi_build_default_ports(ports_b, 0, 0, 0, 0, 0) /* UDP */); def = &ndpi_str->proto_defaults[ndpi_str->ndpi_num_supported_protocols]; subprotocol_id = ndpi_str->ndpi_num_supported_protocols; ndpi_str->ndpi_num_supported_protocols++, ndpi_str->ndpi_num_custom_protocols++; } } while ((elem = strsep(&rule, ",")) != NULL) { char *attr = elem, *value = NULL; ndpi_port_range range; int is_tcp = 0, is_udp = 0, is_ip = 0; if(strncmp(attr, "tcp:", 4) == 0) is_tcp = 1, value = &attr[4]; else if(strncmp(attr, "udp:", 4) == 0) is_udp = 1, value = &attr[4]; else if(strncmp(attr, "ip:", 3) == 0) is_ip = 1, value = &attr[3]; else if(strncmp(attr, "host:", 5) == 0) { /* host:"<value>",host:"<value>",.....@<subproto> */ value = &attr[5]; if(value[0] == '"') value++; /* remove leading " */ if(value[strlen(value) - 1] == '"') value[strlen(value) - 1] = '\0'; /* remove trailing " */ } if(is_tcp || is_udp) { u_int p_low, p_high; if(sscanf(value, "%u-%u", &p_low, &p_high) == 2) range.port_low = p_low, range.port_high = p_high; else range.port_low = range.port_high = atoi(&elem[4]); if(do_add) addDefaultPort(ndpi_str, &range, def, 1 /* Custom user proto */, is_tcp ? &ndpi_str->tcpRoot : &ndpi_str->udpRoot, __FUNCTION__, __LINE__); else removeDefaultPort(&range, def, is_tcp ? &ndpi_str->tcpRoot : &ndpi_str->udpRoot); } else if(is_ip) { /* NDPI_PROTOCOL_TOR */ ndpi_add_host_ip_subprotocol(ndpi_str, value, subprotocol_id); } else { if(do_add) ndpi_add_host_url_subprotocol(ndpi_str, value, subprotocol_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_ACCEPTABLE); else ndpi_remove_host_url_subprotocol(ndpi_str, value, subprotocol_id); } } return(0); } /* ******************************************************************** */ /* * Format: * * <host|ip> <category_id> * * Notes: * - host and category are separated by a single TAB * - empty lines or lines starting with # are ignored */ int ndpi_load_categories_file(struct ndpi_detection_module_struct *ndpi_str, const char *path) { char buffer[512], *line, *name, *category, *saveptr; FILE *fd; int len, num = 0; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); return(-1); } while (1) { line = fgets(buffer, sizeof(buffer), fd); if(line == NULL) break; len = strlen(line); if((len <= 1) || (line[0] == '#')) continue; line[len - 1] = '\0'; name = strtok_r(line, "\t", &saveptr); if(name) { category = strtok_r(NULL, "\t", &saveptr); if(category) { int rc = ndpi_load_category(ndpi_str, name, (ndpi_protocol_category_t) atoi(category)); if(rc >= 0) num++; } } } fclose(fd); ndpi_enable_loaded_categories(ndpi_str); return(num); } /* ******************************************************************** */ /* Format: <tcp|udp>:<port>,<tcp|udp>:<port>,.....@<proto> Subprotocols Format: host:"<value>",host:"<value>",.....@<subproto> IP based Subprotocols Format (<value> is IP or CIDR): ip:<value>,ip:<value>,.....@<subproto> Example: tcp:80,tcp:3128@HTTP udp:139@NETBIOS */ int ndpi_load_protocols_file(struct ndpi_detection_module_struct *ndpi_str, const char *path) { FILE *fd; char *buffer, *old_buffer; int chunk_len = 512, buffer_len = chunk_len, old_buffer_len; int i, rc = -1; fd = fopen(path, "r"); if(fd == NULL) { NDPI_LOG_ERR(ndpi_str, "Unable to open file %s [%s]\n", path, strerror(errno)); goto error; } buffer = ndpi_malloc(buffer_len); if(buffer == NULL) { NDPI_LOG_ERR(ndpi_str, "Memory allocation failure\n"); goto close_fd; } while (1) { char *line = buffer; int line_len = buffer_len; while ((line = fgets(line, line_len, fd)) != NULL && line[strlen(line) - 1] != '\n') { i = strlen(line); old_buffer = buffer; old_buffer_len = buffer_len; buffer_len += chunk_len; buffer = ndpi_realloc(old_buffer, old_buffer_len, buffer_len); if(buffer == NULL) { NDPI_LOG_ERR(ndpi_str, "Memory allocation failure\n"); ndpi_free(old_buffer); goto close_fd; } line = &buffer[i]; line_len = chunk_len; } if(!line) /* safety check */ break; i = strlen(buffer); if((i <= 1) || (buffer[0] == '#')) continue; else buffer[i - 1] = '\0'; ndpi_handle_rule(ndpi_str, buffer, 1); } rc = 0; ndpi_free(buffer); close_fd: fclose(fd); error: return(rc); } /* ******************************************************************** */ /* ntop */ void ndpi_set_bitmask_protocol_detection(char *label, struct ndpi_detection_module_struct *ndpi_str, const NDPI_PROTOCOL_BITMASK *detection_bitmask, const u_int32_t idx, u_int16_t ndpi_protocol_id, void (*func)(struct ndpi_detection_module_struct *, struct ndpi_flow_struct *flow), const NDPI_SELECTION_BITMASK_PROTOCOL_SIZE ndpi_selection_bitmask, u_int8_t b_save_bitmask_unknow, u_int8_t b_add_detection_bitmask) { /* Compare specify protocol bitmask with main detection bitmask */ if(NDPI_COMPARE_PROTOCOL_TO_BITMASK(*detection_bitmask, ndpi_protocol_id) != 0) { #ifdef DEBUG NDPI_LOG_DBG2(ndpi_str, "[NDPI] ndpi_set_bitmask_protocol_detection: %s : [callback_buffer] idx= %u, [proto_defaults] " "protocol_id=%u\n", label, idx, ndpi_protocol_id); #endif if(ndpi_str->proto_defaults[ndpi_protocol_id].protoIdx != 0) { NDPI_LOG_DBG2(ndpi_str, "[NDPI] Internal error: protocol %s/%u has been already registered\n", label, ndpi_protocol_id); #ifdef DEBUG } else { NDPI_LOG_DBG2(ndpi_str, "[NDPI] Adding %s with protocol id %d\n", label, ndpi_protocol_id); #endif } /* Set function and index protocol within proto_default structure for port protocol detection and callback_buffer function for DPI protocol detection */ ndpi_str->proto_defaults[ndpi_protocol_id].protoIdx = idx; ndpi_str->proto_defaults[ndpi_protocol_id].func = ndpi_str->callback_buffer[idx].func = func; /* Set ndpi_selection_bitmask for protocol */ ndpi_str->callback_buffer[idx].ndpi_selection_bitmask = ndpi_selection_bitmask; /* Reset protocol detection bitmask via NDPI_PROTOCOL_UNKNOWN and than add specify protocol bitmast to callback buffer. */ if(b_save_bitmask_unknow) NDPI_SAVE_AS_BITMASK(ndpi_str->callback_buffer[idx].detection_bitmask, NDPI_PROTOCOL_UNKNOWN); if(b_add_detection_bitmask) NDPI_ADD_PROTOCOL_TO_BITMASK(ndpi_str->callback_buffer[idx].detection_bitmask, ndpi_protocol_id); NDPI_SAVE_AS_BITMASK(ndpi_str->callback_buffer[idx].excluded_protocol_bitmask, ndpi_protocol_id); } } /* ******************************************************************** */ void ndpi_set_protocol_detection_bitmask2(struct ndpi_detection_module_struct *ndpi_str, const NDPI_PROTOCOL_BITMASK *dbm) { NDPI_PROTOCOL_BITMASK detection_bitmask_local; NDPI_PROTOCOL_BITMASK *detection_bitmask = &detection_bitmask_local; u_int32_t a = 0; NDPI_BITMASK_SET(detection_bitmask_local, *dbm); NDPI_BITMASK_SET(ndpi_str->detection_bitmask, *dbm); /* set this here to zero to be interrupt safe */ ndpi_str->callback_buffer_size = 0; /* HTTP */ init_http_dissector(ndpi_str, &a, detection_bitmask); /* STARCRAFT */ init_starcraft_dissector(ndpi_str, &a, detection_bitmask); /* TLS */ init_tls_dissector(ndpi_str, &a, detection_bitmask); /* STUN */ init_stun_dissector(ndpi_str, &a, detection_bitmask); /* RTP */ init_rtp_dissector(ndpi_str, &a, detection_bitmask); /* RTSP */ init_rtsp_dissector(ndpi_str, &a, detection_bitmask); /* RDP */ init_rdp_dissector(ndpi_str, &a, detection_bitmask); /* SIP */ init_sip_dissector(ndpi_str, &a, detection_bitmask); /* IMO */ init_imo_dissector(ndpi_str, &a, detection_bitmask); /* Teredo */ init_teredo_dissector(ndpi_str, &a, detection_bitmask); /* EDONKEY */ init_edonkey_dissector(ndpi_str, &a, detection_bitmask); /* FASTTRACK */ init_fasttrack_dissector(ndpi_str, &a, detection_bitmask); /* GNUTELLA */ init_gnutella_dissector(ndpi_str, &a, detection_bitmask); /* DIRECTCONNECT */ init_directconnect_dissector(ndpi_str, &a, detection_bitmask); /* NATS */ init_nats_dissector(ndpi_str, &a, detection_bitmask); /* APPLEJUICE */ init_applejuice_dissector(ndpi_str, &a, detection_bitmask); /* SOULSEEK */ init_soulseek_dissector(ndpi_str, &a, detection_bitmask); /* SOCKS */ init_socks_dissector(ndpi_str, &a, detection_bitmask); /* IRC */ init_irc_dissector(ndpi_str, &a, detection_bitmask); /* JABBER */ init_jabber_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_POP */ init_mail_pop_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_IMAP */ init_mail_imap_dissector(ndpi_str, &a, detection_bitmask); /* MAIL_SMTP */ init_mail_smtp_dissector(ndpi_str, &a, detection_bitmask); /* USENET */ init_usenet_dissector(ndpi_str, &a, detection_bitmask); /* DNS */ init_dns_dissector(ndpi_str, &a, detection_bitmask); /* FILETOPIA */ init_fbzero_dissector(ndpi_str, &a, detection_bitmask); /* VMWARE */ init_vmware_dissector(ndpi_str, &a, detection_bitmask); /* NON_TCP_UDP */ init_non_tcp_udp_dissector(ndpi_str, &a, detection_bitmask); /* SOPCAST */ init_sopcast_dissector(ndpi_str, &a, detection_bitmask); /* TVUPLAYER */ init_tvuplayer_dissector(ndpi_str, &a, detection_bitmask); /* PPSTREAM */ init_ppstream_dissector(ndpi_str, &a, detection_bitmask); /* PPLIVE */ init_pplive_dissector(ndpi_str, &a, detection_bitmask); /* IAX */ init_iax_dissector(ndpi_str, &a, detection_bitmask); /* MGPC */ init_mgpc_dissector(ndpi_str, &a, detection_bitmask); /* ZATTOO */ init_zattoo_dissector(ndpi_str, &a, detection_bitmask); /* QQ */ init_qq_dissector(ndpi_str, &a, detection_bitmask); /* SSH */ init_ssh_dissector(ndpi_str, &a, detection_bitmask); /* AYIYA */ init_ayiya_dissector(ndpi_str, &a, detection_bitmask); /* THUNDER */ init_thunder_dissector(ndpi_str, &a, detection_bitmask); /* VNC */ init_vnc_dissector(ndpi_str, &a, detection_bitmask); /* TEAMVIEWER */ init_teamviewer_dissector(ndpi_str, &a, detection_bitmask); /* DHCP */ init_dhcp_dissector(ndpi_str, &a, detection_bitmask); /* STEAM */ init_steam_dissector(ndpi_str, &a, detection_bitmask); /* HALFLIFE2 */ init_halflife2_dissector(ndpi_str, &a, detection_bitmask); /* XBOX */ init_xbox_dissector(ndpi_str, &a, detection_bitmask); /* HTTP_APPLICATION_ACTIVESYNC */ init_http_activesync_dissector(ndpi_str, &a, detection_bitmask); /* SMB */ init_smb_dissector(ndpi_str, &a, detection_bitmask); /* MINING */ init_mining_dissector(ndpi_str, &a, detection_bitmask); /* TELNET */ init_telnet_dissector(ndpi_str, &a, detection_bitmask); /* NTP */ init_ntp_dissector(ndpi_str, &a, detection_bitmask); /* NFS */ init_nfs_dissector(ndpi_str, &a, detection_bitmask); /* SSDP */ init_ssdp_dissector(ndpi_str, &a, detection_bitmask); /* WORLD_OF_WARCRAFT */ init_world_of_warcraft_dissector(ndpi_str, &a, detection_bitmask); /* POSTGRES */ init_postgres_dissector(ndpi_str, &a, detection_bitmask); /* MYSQL */ init_mysql_dissector(ndpi_str, &a, detection_bitmask); /* BGP */ init_bgp_dissector(ndpi_str, &a, detection_bitmask); /* SNMP */ init_snmp_dissector(ndpi_str, &a, detection_bitmask); /* KONTIKI */ init_kontiki_dissector(ndpi_str, &a, detection_bitmask); /* ICECAST */ init_icecast_dissector(ndpi_str, &a, detection_bitmask); /* SHOUTCAST */ init_shoutcast_dissector(ndpi_str, &a, detection_bitmask); /* KERBEROS */ init_kerberos_dissector(ndpi_str, &a, detection_bitmask); /* OPENFT */ init_openft_dissector(ndpi_str, &a, detection_bitmask); /* SYSLOG */ init_syslog_dissector(ndpi_str, &a, detection_bitmask); /* DIRECT_DOWNLOAD_LINK */ init_directdownloadlink_dissector(ndpi_str, &a, detection_bitmask); /* NETBIOS */ init_netbios_dissector(ndpi_str, &a, detection_bitmask); /* MDNS */ init_mdns_dissector(ndpi_str, &a, detection_bitmask); /* IPP */ init_ipp_dissector(ndpi_str, &a, detection_bitmask); /* LDAP */ init_ldap_dissector(ndpi_str, &a, detection_bitmask); /* WARCRAFT3 */ init_warcraft3_dissector(ndpi_str, &a, detection_bitmask); /* XDMCP */ init_xdmcp_dissector(ndpi_str, &a, detection_bitmask); /* TFTP */ init_tftp_dissector(ndpi_str, &a, detection_bitmask); /* MSSQL_TDS */ init_mssql_tds_dissector(ndpi_str, &a, detection_bitmask); /* PPTP */ init_pptp_dissector(ndpi_str, &a, detection_bitmask); /* STEALTHNET */ init_stealthnet_dissector(ndpi_str, &a, detection_bitmask); /* DHCPV6 */ init_dhcpv6_dissector(ndpi_str, &a, detection_bitmask); /* AFP */ init_afp_dissector(ndpi_str, &a, detection_bitmask); /* check_mk */ init_checkmk_dissector(ndpi_str, &a, detection_bitmask); /* AIMINI */ init_aimini_dissector(ndpi_str, &a, detection_bitmask); /* FLORENSIA */ init_florensia_dissector(ndpi_str, &a, detection_bitmask); /* MAPLESTORY */ init_maplestory_dissector(ndpi_str, &a, detection_bitmask); /* DOFUS */ init_dofus_dissector(ndpi_str, &a, detection_bitmask); /* WORLD_OF_KUNG_FU */ init_world_of_kung_fu_dissector(ndpi_str, &a, detection_bitmask); /* FIESTA */ init_fiesta_dissector(ndpi_str, &a, detection_bitmask); /* CROSSIFIRE */ init_crossfire_dissector(ndpi_str, &a, detection_bitmask); /* GUILDWARS */ init_guildwars_dissector(ndpi_str, &a, detection_bitmask); /* ARMAGETRON */ init_armagetron_dissector(ndpi_str, &a, detection_bitmask); /* DROPBOX */ init_dropbox_dissector(ndpi_str, &a, detection_bitmask); /* SPOTIFY */ init_spotify_dissector(ndpi_str, &a, detection_bitmask); /* RADIUS */ init_radius_dissector(ndpi_str, &a, detection_bitmask); /* CITRIX */ init_citrix_dissector(ndpi_str, &a, detection_bitmask); /* LOTUS_NOTES */ init_lotus_notes_dissector(ndpi_str, &a, detection_bitmask); /* GTP */ init_gtp_dissector(ndpi_str, &a, detection_bitmask); /* DCERPC */ init_dcerpc_dissector(ndpi_str, &a, detection_bitmask); /* NETFLOW */ init_netflow_dissector(ndpi_str, &a, detection_bitmask); /* SFLOW */ init_sflow_dissector(ndpi_str, &a, detection_bitmask); /* H323 */ init_h323_dissector(ndpi_str, &a, detection_bitmask); /* OPENVPN */ init_openvpn_dissector(ndpi_str, &a, detection_bitmask); /* NOE */ init_noe_dissector(ndpi_str, &a, detection_bitmask); /* CISCOVPN */ init_ciscovpn_dissector(ndpi_str, &a, detection_bitmask); /* TEAMSPEAK */ init_teamspeak_dissector(ndpi_str, &a, detection_bitmask); /* TOR */ init_tor_dissector(ndpi_str, &a, detection_bitmask); /* SKINNY */ init_skinny_dissector(ndpi_str, &a, detection_bitmask); /* RTCP */ init_rtcp_dissector(ndpi_str, &a, detection_bitmask); /* RSYNC */ init_rsync_dissector(ndpi_str, &a, detection_bitmask); /* WHOIS_DAS */ init_whois_das_dissector(ndpi_str, &a, detection_bitmask); /* ORACLE */ init_oracle_dissector(ndpi_str, &a, detection_bitmask); /* CORBA */ init_corba_dissector(ndpi_str, &a, detection_bitmask); /* RTMP */ init_rtmp_dissector(ndpi_str, &a, detection_bitmask); /* FTP_CONTROL */ init_ftp_control_dissector(ndpi_str, &a, detection_bitmask); /* FTP_DATA */ init_ftp_data_dissector(ndpi_str, &a, detection_bitmask); /* PANDO */ init_pando_dissector(ndpi_str, &a, detection_bitmask); /* MEGACO */ init_megaco_dissector(ndpi_str, &a, detection_bitmask); /* REDIS */ init_redis_dissector(ndpi_str, &a, detection_bitmask); /* UPnP */ init_upnp_dissector(ndpi_str, &a, detection_bitmask); /* VHUA */ init_vhua_dissector(ndpi_str, &a, detection_bitmask); /* ZMQ */ init_zmq_dissector(ndpi_str, &a, detection_bitmask); /* TELEGRAM */ init_telegram_dissector(ndpi_str, &a, detection_bitmask); /* QUIC */ init_quic_dissector(ndpi_str, &a, detection_bitmask); /* DIAMETER */ init_diameter_dissector(ndpi_str, &a, detection_bitmask); /* APPLE_PUSH */ init_apple_push_dissector(ndpi_str, &a, detection_bitmask); /* EAQ */ init_eaq_dissector(ndpi_str, &a, detection_bitmask); /* KAKAOTALK_VOICE */ init_kakaotalk_voice_dissector(ndpi_str, &a, detection_bitmask); /* MPEGTS */ init_mpegts_dissector(ndpi_str, &a, detection_bitmask); /* UBNTAC2 */ init_ubntac2_dissector(ndpi_str, &a, detection_bitmask); /* COAP */ init_coap_dissector(ndpi_str, &a, detection_bitmask); /* MQTT */ init_mqtt_dissector(ndpi_str, &a, detection_bitmask); /* SOME/IP */ init_someip_dissector(ndpi_str, &a, detection_bitmask); /* RX */ init_rx_dissector(ndpi_str, &a, detection_bitmask); /* GIT */ init_git_dissector(ndpi_str, &a, detection_bitmask); /* HANGOUT */ init_hangout_dissector(ndpi_str, &a, detection_bitmask); /* DRDA */ init_drda_dissector(ndpi_str, &a, detection_bitmask); /* BJNP */ init_bjnp_dissector(ndpi_str, &a, detection_bitmask); /* SMPP */ init_smpp_dissector(ndpi_str, &a, detection_bitmask); /* TINC */ init_tinc_dissector(ndpi_str, &a, detection_bitmask); /* FIX */ init_fix_dissector(ndpi_str, &a, detection_bitmask); /* NINTENDO */ init_nintendo_dissector(ndpi_str, &a, detection_bitmask); /* MODBUS */ init_modbus_dissector(ndpi_str, &a, detection_bitmask); /* CAPWAP */ init_capwap_dissector(ndpi_str, &a, detection_bitmask); /* ZABBIX */ init_zabbix_dissector(ndpi_str, &a, detection_bitmask); /*** Put false-positive sensitive protocols at the end ***/ /* VIBER */ init_viber_dissector(ndpi_str, &a, detection_bitmask); /* SKYPE */ init_skype_dissector(ndpi_str, &a, detection_bitmask); /* BITTORRENT */ init_bittorrent_dissector(ndpi_str, &a, detection_bitmask); /* WHATSAPP */ init_whatsapp_dissector(ndpi_str, &a, detection_bitmask); /* OOKLA */ init_ookla_dissector(ndpi_str, &a, detection_bitmask); /* AMQP */ init_amqp_dissector(ndpi_str, &a, detection_bitmask); /* CSGO */ init_csgo_dissector(ndpi_str, &a, detection_bitmask); /* LISP */ init_lisp_dissector(ndpi_str, &a, detection_bitmask); /* AJP */ init_ajp_dissector(ndpi_str, &a, detection_bitmask); /* Memcached */ init_memcached_dissector(ndpi_str, &a, detection_bitmask); /* Nest Log Sink */ init_nest_log_sink_dissector(ndpi_str, &a, detection_bitmask); /* WireGuard VPN */ init_wireguard_dissector(ndpi_str, &a, detection_bitmask); /* Amazon_Video */ init_amazon_video_dissector(ndpi_str, &a, detection_bitmask); /* Targus Getdata */ init_targus_getdata_dissector(ndpi_str, &a, detection_bitmask); /* S7 comm */ init_s7comm_dissector(ndpi_str, &a, detection_bitmask); /* IEC 60870-5-104 */ init_104_dissector(ndpi_str, &a, detection_bitmask); /* WEBSOCKET */ init_websocket_dissector(ndpi_str, &a, detection_bitmask); #ifdef CUSTOM_NDPI_PROTOCOLS #include "../../../nDPI-custom/custom_ndpi_main_init.c" #endif /* ----------------------------------------------------------------- */ ndpi_str->callback_buffer_size = a; NDPI_LOG_DBG2(ndpi_str, "callback_buffer_size is %u\n", ndpi_str->callback_buffer_size); /* now build the specific buffer for tcp, udp and non_tcp_udp */ ndpi_str->callback_buffer_size_tcp_payload = 0; ndpi_str->callback_buffer_size_tcp_no_payload = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC)) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_tcp_payload, adding buffer %u as entry %u\n", a, ndpi_str->callback_buffer_size_tcp_payload); memcpy(&ndpi_str->callback_buffer_tcp_payload[ndpi_str->callback_buffer_size_tcp_payload], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_tcp_payload++; if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD) == 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2( ndpi_str, "\tcallback_buffer_tcp_no_payload, additional adding buffer %u to no_payload process\n", a); memcpy(&ndpi_str->callback_buffer_tcp_no_payload[ndpi_str->callback_buffer_size_tcp_no_payload], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_tcp_no_payload++; } } } ndpi_str->callback_buffer_size_udp = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC)) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_size_udp: adding buffer : %u as entry %u\n", a, ndpi_str->callback_buffer_size_udp); memcpy(&ndpi_str->callback_buffer_udp[ndpi_str->callback_buffer_size_udp], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_udp++; } } ndpi_str->callback_buffer_size_non_tcp_udp = 0; for (a = 0; a < ndpi_str->callback_buffer_size; a++) { if((ndpi_str->callback_buffer[a].ndpi_selection_bitmask & (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP)) == 0 || (ndpi_str->callback_buffer[a].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC) != 0) { if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "callback_buffer_non_tcp_udp: adding buffer : %u as entry %u\n", a, ndpi_str->callback_buffer_size_non_tcp_udp); memcpy(&ndpi_str->callback_buffer_non_tcp_udp[ndpi_str->callback_buffer_size_non_tcp_udp], &ndpi_str->callback_buffer[a], sizeof(struct ndpi_call_function_struct)); ndpi_str->callback_buffer_size_non_tcp_udp++; } } } #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* handle extension headers in IPv6 packets * arguments: * l4ptr: pointer to the byte following the initial IPv6 header * l4len: the length of the IPv6 packet excluding the IPv6 header * nxt_hdr: next header value from the IPv6 header * result: * l4ptr: pointer to the start of the actual packet payload * l4len: length of the actual payload * nxt_hdr: protocol of the actual payload * returns 0 upon success and 1 upon failure */ int ndpi_handle_ipv6_extension_headers(struct ndpi_detection_module_struct *ndpi_str, const u_int8_t **l4ptr, u_int16_t *l4len, u_int8_t *nxt_hdr) { while ((*nxt_hdr == 0 || *nxt_hdr == 43 || *nxt_hdr == 44 || *nxt_hdr == 60 || *nxt_hdr == 135 || *nxt_hdr == 59)) { u_int16_t ehdr_len; // no next header if(*nxt_hdr == 59) { return(1); } // fragment extension header has fixed size of 8 bytes and the first byte is the next header type if(*nxt_hdr == 44) { if(*l4len < 8) { return(1); } *nxt_hdr = (*l4ptr)[0]; *l4len -= 8; (*l4ptr) += 8; continue; } // the other extension headers have one byte for the next header type // and one byte for the extension header length in 8 byte steps minus the first 8 bytes if(*l4len < 2) { return(1); } ehdr_len = (*l4ptr)[1]; ehdr_len *= 8; ehdr_len += 8; if(*l4len < ehdr_len) { return(1); } *nxt_hdr = (*l4ptr)[0]; *l4len -= ehdr_len; (*l4ptr) += ehdr_len; } return(0); } #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ static u_int8_t ndpi_iph_is_valid_and_not_fragmented(const struct ndpi_iphdr *iph, const u_int16_t ipsize) { //#ifdef REQUIRE_FULL_PACKETS if(ipsize < iph->ihl * 4 || ipsize < ntohs(iph->tot_len) || ntohs(iph->tot_len) < iph->ihl * 4 || (iph->frag_off & htons(0x1FFF)) != 0) { return(0); } //#endif return(1); } static u_int8_t ndpi_detection_get_l4_internal(struct ndpi_detection_module_struct *ndpi_str, const u_int8_t *l3, u_int16_t l3_len, const u_int8_t **l4_return, u_int16_t *l4_len_return, u_int8_t *l4_protocol_return, u_int32_t flags) { const struct ndpi_iphdr *iph = NULL; #ifdef NDPI_DETECTION_SUPPORT_IPV6 const struct ndpi_ipv6hdr *iph_v6 = NULL; #endif u_int16_t l4len = 0; const u_int8_t *l4ptr = NULL; u_int8_t l4protocol = 0; if(l3 == NULL || l3_len < sizeof(struct ndpi_iphdr)) return(1); if((iph = (const struct ndpi_iphdr *) l3) == NULL) return(1); if(iph->version == IPVERSION && iph->ihl >= 5) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header\n"); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(iph->version == 6 && l3_len >= sizeof(struct ndpi_ipv6hdr)) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header\n"); iph_v6 = (const struct ndpi_ipv6hdr *) l3; iph = NULL; } #endif else { return(1); } if((flags & NDPI_DETECTION_ONLY_IPV6) && iph != NULL) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header found but excluded by flag\n"); return(1); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if((flags & NDPI_DETECTION_ONLY_IPV4) && iph_v6 != NULL) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header found but excluded by flag\n"); return(1); } #endif if(iph != NULL && ndpi_iph_is_valid_and_not_fragmented(iph, l3_len)) { u_int16_t len = ntohs(iph->tot_len); u_int16_t hlen = (iph->ihl * 4); l4ptr = (((const u_int8_t *) iph) + iph->ihl * 4); if(len == 0) len = l3_len; l4len = (len > hlen) ? (len - hlen) : 0; l4protocol = iph->protocol; } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(iph_v6 != NULL && (l3_len - sizeof(struct ndpi_ipv6hdr)) >= ntohs(iph_v6->ip6_hdr.ip6_un1_plen)) { l4ptr = (((const u_int8_t *) iph_v6) + sizeof(struct ndpi_ipv6hdr)); l4len = ntohs(iph_v6->ip6_hdr.ip6_un1_plen); l4protocol = iph_v6->ip6_hdr.ip6_un1_nxt; // we need to handle IPv6 extension headers if present if(ndpi_handle_ipv6_extension_headers(ndpi_str, &l4ptr, &l4len, &l4protocol) != 0) { return(1); } } #endif else { return(1); } if(l4_return != NULL) { *l4_return = l4ptr; } if(l4_len_return != NULL) { *l4_len_return = l4len; } if(l4_protocol_return != NULL) { *l4_protocol_return = l4protocol; } return(0); } /* ************************************************ */ void ndpi_apply_flow_protocol_to_packet(struct ndpi_flow_struct *flow, struct ndpi_packet_struct *packet) { memcpy(&packet->detected_protocol_stack, &flow->detected_protocol_stack, sizeof(packet->detected_protocol_stack)); memcpy(&packet->protocol_stack_info, &flow->protocol_stack_info, sizeof(packet->protocol_stack_info)); } /* ************************************************ */ static int ndpi_init_packet_header(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, unsigned short packetlen) { const struct ndpi_iphdr *decaps_iph = NULL; u_int16_t l3len; u_int16_t l4len; const u_int8_t *l4ptr; u_int8_t l4protocol; u_int8_t l4_result; if(!flow) return(1); /* reset payload_packet_len, will be set if ipv4 tcp or udp */ flow->packet.payload_packet_len = 0; flow->packet.l4_packet_len = 0; flow->packet.l3_packet_len = packetlen; flow->packet.tcp = NULL, flow->packet.udp = NULL; flow->packet.generic_l4_ptr = NULL; #ifdef NDPI_DETECTION_SUPPORT_IPV6 flow->packet.iphv6 = NULL; #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ ndpi_apply_flow_protocol_to_packet(flow, &flow->packet); l3len = flow->packet.l3_packet_len; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iph != NULL) { #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ decaps_iph = flow->packet.iph; #ifdef NDPI_DETECTION_SUPPORT_IPV6 } #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ if(decaps_iph && decaps_iph->version == IPVERSION && decaps_iph->ihl >= 5) { NDPI_LOG_DBG2(ndpi_str, "ipv4 header\n"); } #ifdef NDPI_DETECTION_SUPPORT_IPV6 else if(decaps_iph && decaps_iph->version == 6 && l3len >= sizeof(struct ndpi_ipv6hdr) && (ndpi_str->ip_version_limit & NDPI_DETECTION_ONLY_IPV4) == 0) { NDPI_LOG_DBG2(ndpi_str, "ipv6 header\n"); flow->packet.iphv6 = (struct ndpi_ipv6hdr *) flow->packet.iph; flow->packet.iph = NULL; } #endif else { flow->packet.iph = NULL; return(1); } /* needed: * - unfragmented packets * - ip header <= packet len * - ip total length >= packet len */ l4ptr = NULL; l4len = 0; l4protocol = 0; l4_result = ndpi_detection_get_l4_internal(ndpi_str, (const u_int8_t *) decaps_iph, l3len, &l4ptr, &l4len, &l4protocol, 0); if(l4_result != 0) { return(1); } flow->packet.l4_protocol = l4protocol; flow->packet.l4_packet_len = l4len; flow->l4_proto = l4protocol; /* tcp / udp detection */ if(l4protocol == IPPROTO_TCP && flow->packet.l4_packet_len >= 20 /* min size of tcp */) { /* tcp */ flow->packet.tcp = (struct ndpi_tcphdr *) l4ptr; if(flow->packet.l4_packet_len >= flow->packet.tcp->doff * 4) { flow->packet.payload_packet_len = flow->packet.l4_packet_len - flow->packet.tcp->doff * 4; flow->packet.actual_payload_len = flow->packet.payload_packet_len; flow->packet.payload = ((u_int8_t *) flow->packet.tcp) + (flow->packet.tcp->doff * 4); /* check for new tcp syn packets, here * idea: reset detection state if a connection is unknown */ if(flow->packet.tcp->syn != 0 && flow->packet.tcp->ack == 0 && flow->init_finished != 0 && flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { u_int8_t backup; u_int16_t backup1, backup2; if(flow->http.url) { ndpi_free(flow->http.url); flow->http.url = NULL; } if(flow->http.content_type) { ndpi_free(flow->http.content_type); flow->http.content_type = NULL; } if(flow->http.user_agent) { ndpi_free(flow->http.user_agent); flow->http.user_agent = NULL; } if(flow->kerberos_buf.pktbuf) { ndpi_free(flow->kerberos_buf.pktbuf); flow->kerberos_buf.pktbuf = NULL; } if(flow->l4.tcp.tls.message.buffer) { ndpi_free(flow->l4.tcp.tls.message.buffer); flow->l4.tcp.tls.message.buffer = NULL; flow->l4.tcp.tls.message.buffer_len = flow->l4.tcp.tls.message.buffer_used = 0; } backup = flow->num_processed_pkts; backup1 = flow->guessed_protocol_id; backup2 = flow->guessed_host_protocol_id; memset(flow, 0, sizeof(*(flow))); flow->num_processed_pkts = backup; flow->guessed_protocol_id = backup1; flow->guessed_host_protocol_id = backup2; NDPI_LOG_DBG(ndpi_str, "tcp syn packet for unknown protocol, reset detection state\n"); } } else { /* tcp header not complete */ flow->packet.tcp = NULL; } } else if(l4protocol == IPPROTO_UDP && flow->packet.l4_packet_len >= 8 /* size of udp */) { flow->packet.udp = (struct ndpi_udphdr *) l4ptr; flow->packet.payload_packet_len = flow->packet.l4_packet_len - 8; flow->packet.payload = ((u_int8_t *) flow->packet.udp) + 8; } else { flow->packet.generic_l4_ptr = l4ptr; } return(0); } /* ************************************************ */ void ndpi_connection_tracking(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { if(!flow) { return; } else { /* const for gcc code optimization and cleaner code */ struct ndpi_packet_struct *packet = &flow->packet; const struct ndpi_iphdr *iph = packet->iph; #ifdef NDPI_DETECTION_SUPPORT_IPV6 const struct ndpi_ipv6hdr *iphv6 = packet->iphv6; #endif const struct ndpi_tcphdr *tcph = packet->tcp; const struct ndpi_udphdr *udph = flow->packet.udp; packet->tcp_retransmission = 0, packet->packet_direction = 0; if(ndpi_str->direction_detect_disable) { packet->packet_direction = flow->packet_direction; } else { if(iph != NULL && ntohl(iph->saddr) < ntohl(iph->daddr)) packet->packet_direction = 1; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(iphv6 != NULL && NDPI_COMPARE_IPV6_ADDRESS_STRUCTS(&iphv6->ip6_src, &iphv6->ip6_dst) != 0) packet->packet_direction = 1; #endif } packet->packet_lines_parsed_complete = 0; if(flow->init_finished == 0) { flow->init_finished = 1; flow->setup_packet_direction = packet->packet_direction; } if(tcph != NULL) { /* reset retried bytes here before setting it */ packet->num_retried_bytes = 0; if(!ndpi_str->direction_detect_disable) packet->packet_direction = (ntohs(tcph->source) < ntohs(tcph->dest)) ? 1 : 0; if(tcph->syn != 0 && tcph->ack == 0 && flow->l4.tcp.seen_syn == 0 && flow->l4.tcp.seen_syn_ack == 0 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_syn = 1; } if(tcph->syn != 0 && tcph->ack != 0 && flow->l4.tcp.seen_syn == 1 && flow->l4.tcp.seen_syn_ack == 0 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_syn_ack = 1; } if(tcph->syn == 0 && tcph->ack == 1 && flow->l4.tcp.seen_syn == 1 && flow->l4.tcp.seen_syn_ack == 1 && flow->l4.tcp.seen_ack == 0) { flow->l4.tcp.seen_ack = 1; } if((flow->next_tcp_seq_nr[0] == 0 && flow->next_tcp_seq_nr[1] == 0) || (flow->next_tcp_seq_nr[0] == 0 || flow->next_tcp_seq_nr[1] == 0)) { /* initialize tcp sequence counters */ /* the ack flag needs to be set to get valid sequence numbers from the other * direction. Usually it will catch the second packet syn+ack but it works * also for asymmetric traffic where it will use the first data packet * * if the syn flag is set add one to the sequence number, * otherwise use the payload length. */ if(tcph->ack != 0) { flow->next_tcp_seq_nr[flow->packet.packet_direction] = ntohl(tcph->seq) + (tcph->syn ? 1 : packet->payload_packet_len); flow->next_tcp_seq_nr[1 - flow->packet.packet_direction] = ntohl(tcph->ack_seq); } } else if(packet->payload_packet_len > 0) { /* check tcp sequence counters */ if(((u_int32_t)(ntohl(tcph->seq) - flow->next_tcp_seq_nr[packet->packet_direction])) > ndpi_str->tcp_max_retransmission_window_size) { packet->tcp_retransmission = 1; /* CHECK IF PARTIAL RETRY IS HAPPENING */ if((flow->next_tcp_seq_nr[packet->packet_direction] - ntohl(tcph->seq) < packet->payload_packet_len)) { /* num_retried_bytes actual_payload_len hold info about the partial retry analyzer which require this info can make use of this info Other analyzer can use packet->payload_packet_len */ packet->num_retried_bytes = (u_int16_t)(flow->next_tcp_seq_nr[packet->packet_direction] - ntohl(tcph->seq)); packet->actual_payload_len = packet->payload_packet_len - packet->num_retried_bytes; flow->next_tcp_seq_nr[packet->packet_direction] = ntohl(tcph->seq) + packet->payload_packet_len; } } /* normal path actual_payload_len is initialized to payload_packet_len during tcp header parsing itself. It will be changed only in case of retransmission */ else { packet->num_retried_bytes = 0; flow->next_tcp_seq_nr[packet->packet_direction] = ntohl(tcph->seq) + packet->payload_packet_len; } } if(tcph->rst) { flow->next_tcp_seq_nr[0] = 0; flow->next_tcp_seq_nr[1] = 0; } } else if(udph != NULL) { if(!ndpi_str->direction_detect_disable) packet->packet_direction = (htons(udph->source) < htons(udph->dest)) ? 1 : 0; } if(flow->packet_counter < MAX_PACKET_COUNTER && packet->payload_packet_len) { flow->packet_counter++; } if(flow->packet_direction_counter[packet->packet_direction] < MAX_PACKET_COUNTER && packet->payload_packet_len) { flow->packet_direction_counter[packet->packet_direction]++; } if(flow->byte_counter[packet->packet_direction] + packet->payload_packet_len > flow->byte_counter[packet->packet_direction]) { flow->byte_counter[packet->packet_direction] += packet->payload_packet_len; } } } /* ************************************************ */ void check_ndpi_other_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { if(!flow) return; void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } for (a = 0; a < ndpi_str->callback_buffer_size_non_tcp_udp; a++) { if((func != ndpi_str->callback_buffer_non_tcp_udp[a].func) && (ndpi_str->callback_buffer_non_tcp_udp[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_non_tcp_udp[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_non_tcp_udp[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_non_tcp_udp[a].detection_bitmask, detection_bitmask) != 0) { if(ndpi_str->callback_buffer_non_tcp_udp[a].func != NULL) ndpi_str->callback_buffer_non_tcp_udp[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } /* ************************************************ */ void check_ndpi_udp_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { for (a = 0; a < ndpi_str->callback_buffer_size_udp; a++) { if((func != ndpi_str->callback_buffer_udp[a].func) && (ndpi_str->callback_buffer_udp[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_udp[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_udp[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_udp[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_udp[a].func(ndpi_str, flow); // NDPI_LOG_DBG(ndpi_str, "[UDP,CALL] dissector of protocol as callback_buffer idx = %d\n",a); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } else if(_ndpi_debug_callbacks) NDPI_LOG_DBG2(ndpi_str, "[UDP,SKIP] dissector of protocol as callback_buffer idx = %d\n", a); } } } /* ************************************************ */ void check_ndpi_tcp_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { void *func = NULL; u_int32_t a; u_int16_t proto_index = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoIdx; int16_t proto_id = ndpi_str->proto_defaults[flow->guessed_protocol_id].protoId; NDPI_PROTOCOL_BITMASK detection_bitmask; NDPI_SAVE_AS_BITMASK(detection_bitmask, flow->packet.detected_protocol_stack[0]); if(flow->packet.payload_packet_len != 0) { if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { for (a = 0; a < ndpi_str->callback_buffer_size_tcp_payload; a++) { if((func != ndpi_str->callback_buffer_tcp_payload[a].func) && (ndpi_str->callback_buffer_tcp_payload[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_tcp_payload[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_tcp_payload[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_tcp_payload[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_tcp_payload[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } } else { /* no payload */ if((proto_id != NDPI_PROTOCOL_UNKNOWN) && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer[proto_index].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer[proto_index].detection_bitmask, detection_bitmask) != 0 && (ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer[proto_index].ndpi_selection_bitmask) { if((flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (ndpi_str->proto_defaults[flow->guessed_protocol_id].func != NULL) && ((ndpi_str->callback_buffer[flow->guessed_protocol_id].ndpi_selection_bitmask & NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD) == 0)) ndpi_str->proto_defaults[flow->guessed_protocol_id].func(ndpi_str, flow), func = ndpi_str->proto_defaults[flow->guessed_protocol_id].func; } for (a = 0; a < ndpi_str->callback_buffer_size_tcp_no_payload; a++) { if((func != ndpi_str->callback_buffer_tcp_payload[a].func) && (ndpi_str->callback_buffer_tcp_no_payload[a].ndpi_selection_bitmask & *ndpi_selection_packet) == ndpi_str->callback_buffer_tcp_no_payload[a].ndpi_selection_bitmask && NDPI_BITMASK_COMPARE(flow->excluded_protocol_bitmask, ndpi_str->callback_buffer_tcp_no_payload[a].excluded_protocol_bitmask) == 0 && NDPI_BITMASK_COMPARE(ndpi_str->callback_buffer_tcp_no_payload[a].detection_bitmask, detection_bitmask) != 0) { ndpi_str->callback_buffer_tcp_no_payload[a].func(ndpi_str, flow); if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) break; /* Stop after detecting the first protocol */ } } } } /* ********************************************************************************* */ void ndpi_check_flow_func(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, NDPI_SELECTION_BITMASK_PROTOCOL_SIZE *ndpi_selection_packet) { if(flow->packet.tcp != NULL) check_ndpi_tcp_flow_func(ndpi_str, flow, ndpi_selection_packet); else if(flow->packet.udp != NULL) check_ndpi_udp_flow_func(ndpi_str, flow, ndpi_selection_packet); else check_ndpi_other_flow_func(ndpi_str, flow, ndpi_selection_packet); } /* ********************************************************************************* */ u_int16_t ndpi_guess_host_protocol_id(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int16_t ret = NDPI_PROTOCOL_UNKNOWN; if(flow->packet.iph) { struct in_addr addr; u_int16_t sport, dport; addr.s_addr = flow->packet.iph->saddr; if((flow->l4_proto == IPPROTO_TCP) && flow->packet.tcp) sport = flow->packet.tcp->source, dport = flow->packet.tcp->dest; else if((flow->l4_proto == IPPROTO_UDP) && flow->packet.udp) sport = flow->packet.udp->source, dport = flow->packet.udp->dest; else sport = dport = 0; /* guess host protocol */ ret = ndpi_network_port_ptree_match(ndpi_str, &addr, sport); if(ret == NDPI_PROTOCOL_UNKNOWN) { addr.s_addr = flow->packet.iph->daddr; ret = ndpi_network_port_ptree_match(ndpi_str, &addr, dport); } } return(ret); } /* ********************************************************************************* */ ndpi_protocol ndpi_detection_giveup(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t enable_guess, u_int8_t *protocol_was_guessed) { ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; *protocol_was_guessed = 0; if(flow == NULL) return(ret); /* Init defaults */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; ret.category = flow->category; /* Ensure that we don't change our mind if detection is already complete */ if((ret.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN)) return(ret); /* TODO: add the remaining stage_XXXX protocols */ if(flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) { u_int16_t guessed_protocol_id = NDPI_PROTOCOL_UNKNOWN, guessed_host_protocol_id = NDPI_PROTOCOL_UNKNOWN; if(flow->guessed_protocol_id == NDPI_PROTOCOL_STUN) goto check_stun_export; else if((flow->guessed_protocol_id == NDPI_PROTOCOL_HANGOUT_DUO) || (flow->guessed_protocol_id == NDPI_PROTOCOL_MESSENGER) || (flow->guessed_protocol_id == NDPI_PROTOCOL_WHATSAPP_CALL)) { *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, flow->guessed_protocol_id, NDPI_PROTOCOL_UNKNOWN); } else if((flow->l4.tcp.tls.hello_processed == 1) && (flow->protos.stun_ssl.ssl.client_requested_server_name[0] != '\0')) { *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); } else if(enable_guess) { if((flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) && (flow->packet.l4_protocol == IPPROTO_TCP) && flow->l4.tcp.tls.hello_processed) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; guessed_protocol_id = flow->guessed_protocol_id, guessed_host_protocol_id = flow->guessed_host_protocol_id; if((guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) && ((flow->packet.l4_protocol == IPPROTO_UDP) && NDPI_ISSET(&flow->excluded_protocol_bitmask, guessed_host_protocol_id) && is_udp_guessable_protocol(guessed_host_protocol_id))) flow->guessed_host_protocol_id = guessed_host_protocol_id = NDPI_PROTOCOL_UNKNOWN; /* Ignore guessed protocol if they have been discarded */ if((guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) // && (guessed_host_protocol_id == NDPI_PROTOCOL_UNKNOWN) && (flow->packet.l4_protocol == IPPROTO_UDP) && NDPI_ISSET(&flow->excluded_protocol_bitmask, guessed_protocol_id) && is_udp_guessable_protocol(guessed_protocol_id)) flow->guessed_protocol_id = guessed_protocol_id = NDPI_PROTOCOL_UNKNOWN; if((guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) || (guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN)) { if((guessed_protocol_id == 0) && (flow->protos.stun_ssl.stun.num_binding_requests > 0) && (flow->protos.stun_ssl.stun.num_processed_pkts > 0)) guessed_protocol_id = NDPI_PROTOCOL_STUN; if(flow->host_server_name[0] != '\0') { ndpi_protocol_match_result ret_match; memset(&ret_match, 0, sizeof(ret_match)); ndpi_match_host_subprotocol(ndpi_str, flow, (char *) flow->host_server_name, strlen((const char *) flow->host_server_name), &ret_match, NDPI_PROTOCOL_DNS); if(ret_match.protocol_id != NDPI_PROTOCOL_UNKNOWN) guessed_host_protocol_id = ret_match.protocol_id; } *protocol_was_guessed = 1; ndpi_int_change_protocol(ndpi_str, flow, guessed_host_protocol_id, guessed_protocol_id); } } } else if(enable_guess) { if(flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; flow->detected_protocol_stack[1] = flow->guessed_protocol_id; } if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; flow->detected_protocol_stack[0] = flow->guessed_host_protocol_id; } if(flow->detected_protocol_stack[1] == flow->detected_protocol_stack[0]) { *protocol_was_guessed = 1; flow->detected_protocol_stack[1] = flow->guessed_host_protocol_id; } } if((flow->detected_protocol_stack[0] == NDPI_PROTOCOL_UNKNOWN) && (flow->guessed_protocol_id == NDPI_PROTOCOL_STUN)) { check_stun_export: if(flow->protos.stun_ssl.stun.num_processed_pkts || flow->protos.stun_ssl.stun.num_udp_pkts) { // if(/* (flow->protos.stun_ssl.stun.num_processed_pkts >= NDPI_MIN_NUM_STUN_DETECTION) */ *protocol_was_guessed = 1; ndpi_set_detected_protocol(ndpi_str, flow, flow->guessed_host_protocol_id, NDPI_PROTOCOL_STUN); } } ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(ret.master_protocol == NDPI_PROTOCOL_STUN) { if(ret.app_protocol == NDPI_PROTOCOL_FACEBOOK) ret.app_protocol = NDPI_PROTOCOL_MESSENGER; else if(ret.app_protocol == NDPI_PROTOCOL_GOOGLE) { /* As Google has recently introduced Duo, we need to distinguish between it and hangout thing that should be handled by the STUN dissector */ ret.app_protocol = NDPI_PROTOCOL_HANGOUT_DUO; } } if(ret.app_protocol != NDPI_PROTOCOL_UNKNOWN) { *protocol_was_guessed = 1; ndpi_fill_protocol_category(ndpi_str, flow, &ret); } return(ret); } /* ********************************************************************************* */ void ndpi_process_extra_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, const unsigned char *packet, const unsigned short packetlen, const u_int64_t current_time_ms, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) { if(flow == NULL) return; if(flow->server_id == NULL) flow->server_id = dst; /* Default */ /* need at least 20 bytes for ip header */ if(packetlen < 20) { return; } flow->packet.current_time_ms = current_time_ms; /* parse packet */ flow->packet.iph = (struct ndpi_iphdr *) packet; /* we are interested in ipv4 packet */ /* set up the packet headers for the extra packet function to use if it wants */ if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0) return; /* detect traffic for tcp or udp only */ flow->src = src, flow->dst = dst; ndpi_connection_tracking(ndpi_str, flow); /* call the extra packet function (which may add more data/info to flow) */ if(flow->extra_packets_func) { if((flow->extra_packets_func(ndpi_str, flow)) == 0) flow->check_extra_packets = 0; if(++flow->num_extra_packets_checked == flow->max_extra_packets_to_check) flow->extra_packets_func = NULL; /* Enough packets detected */ } } /* ********************************************************************************* */ int ndpi_load_ip_category(struct ndpi_detection_module_struct *ndpi_str, const char *ip_address_and_mask, ndpi_protocol_category_t category) { patricia_node_t *node; struct in_addr pin; int bits = 32; char *ptr; char ipbuf[64]; strncpy(ipbuf, ip_address_and_mask, sizeof(ipbuf)); ipbuf[sizeof(ipbuf) - 1] = '\0'; ptr = strrchr(ipbuf, '/'); if(ptr) { *(ptr++) = '\0'; if(atoi(ptr) >= 0 && atoi(ptr) <= 32) bits = atoi(ptr); } if(inet_pton(AF_INET, ipbuf, &pin) != 1) { NDPI_LOG_DBG2(ndpi_str, "Invalid ip/ip+netmask: %s\n", ip_address_and_mask); return(-1); } if((node = add_to_ptree(ndpi_str->custom_categories.ipAddresses_shadow, AF_INET, &pin, bits)) != NULL) { node->value.uv.user_value = (u_int16_t)category, node->value.uv.additional_user_value = 0; } return(0); } /* ********************************************************************************* */ int ndpi_load_hostname_category(struct ndpi_detection_module_struct *ndpi_str, const char *name_to_add, ndpi_protocol_category_t category) { char *name; if(name_to_add == NULL) return(-1); name = ndpi_strdup(name_to_add); if(name == NULL) return(-1); #if 0 printf("===> %s() Loading %s as %u\n", __FUNCTION__, name, category); #endif AC_PATTERN_t ac_pattern; AC_ERROR_t rc; memset(&ac_pattern, 0, sizeof(ac_pattern)); if(ndpi_str->custom_categories.hostnames_shadow.ac_automa == NULL) { free(name); return(-1); } ac_pattern.astring = name, ac_pattern.length = strlen(ac_pattern.astring); ac_pattern.rep.number = (u_int32_t) category, ac_pattern.rep.category = category;; rc = ac_automata_add(ndpi_str->custom_categories.hostnames_shadow.ac_automa, &ac_pattern); if(rc != ACERR_DUPLICATE_PATTERN && rc != ACERR_SUCCESS) { free(name); return(-1); } if(rc == ACERR_DUPLICATE_PATTERN) free(name); return(0); } /* ********************************************************************************* */ /* Loads an IP or name category */ int ndpi_load_category(struct ndpi_detection_module_struct *ndpi_struct, const char *ip_or_name, ndpi_protocol_category_t category) { int rv; /* Try to load as IP address first */ rv = ndpi_load_ip_category(ndpi_struct, ip_or_name, category); if(rv < 0) { /* IP load failed, load as hostname */ rv = ndpi_load_hostname_category(ndpi_struct, ip_or_name, category); } return(rv); } /* ********************************************************************************* */ int ndpi_enable_loaded_categories(struct ndpi_detection_module_struct *ndpi_str) { int i; /* First add the nDPI known categories matches */ for (i = 0; category_match[i].string_to_match != NULL; i++) ndpi_load_category(ndpi_str, category_match[i].string_to_match, category_match[i].protocol_category); /* Free */ ac_automata_release((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames.ac_automa, 1 /* free patterns strings memory */); /* Finalize */ ac_automata_finalize((AC_AUTOMATA_t *) ndpi_str->custom_categories.hostnames_shadow.ac_automa); /* Swap */ ndpi_str->custom_categories.hostnames.ac_automa = ndpi_str->custom_categories.hostnames_shadow.ac_automa; /* Realloc */ ndpi_str->custom_categories.hostnames_shadow.ac_automa = ac_automata_init(ac_match_handler); if(ndpi_str->custom_categories.ipAddresses != NULL) ndpi_Destroy_Patricia((patricia_tree_t *) ndpi_str->custom_categories.ipAddresses, free_ptree_data); ndpi_str->custom_categories.ipAddresses = ndpi_str->custom_categories.ipAddresses_shadow; ndpi_str->custom_categories.ipAddresses_shadow = ndpi_New_Patricia(32 /* IPv4 */); ndpi_str->custom_categories.categories_loaded = 1; return(0); } /* ********************************************************************************* */ int ndpi_fill_ip_protocol_category(struct ndpi_detection_module_struct *ndpi_str, u_int32_t saddr, u_int32_t daddr, ndpi_protocol *ret) { if(ndpi_str->custom_categories.categories_loaded) { prefix_t prefix; patricia_node_t *node; if(saddr == 0) node = NULL; else { /* Make sure all in network byte order otherwise compares wont work */ fill_prefix_v4(&prefix, (struct in_addr *) &saddr, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); } if(!node) { if(daddr != 0) { fill_prefix_v4(&prefix, (struct in_addr *) &daddr, 32, ((patricia_tree_t *) ndpi_str->protocols_ptree)->maxbits); node = ndpi_patricia_search_best(ndpi_str->custom_categories.ipAddresses, &prefix); } } if(node) { ret->category = (ndpi_protocol_category_t) node->value.uv.user_value; return(1); } } ret->category = ndpi_get_proto_category(ndpi_str, *ret); return(0); } /* ********************************************************************************* */ void ndpi_fill_protocol_category(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol *ret) { if(ndpi_str->custom_categories.categories_loaded) { if(flow->guessed_header_category != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) { flow->category = ret->category = flow->guessed_header_category; return; } if(flow->host_server_name[0] != '\0') { u_int32_t id; int rc = ndpi_match_custom_category(ndpi_str, (char *) flow->host_server_name, strlen((char *) flow->host_server_name), &id); if(rc == 0) { flow->category = ret->category = (ndpi_protocol_category_t) id; return; } } if(flow->l4.tcp.tls.hello_processed == 1 && flow->protos.stun_ssl.ssl.client_requested_server_name[0] != '\0') { u_int32_t id; int rc = ndpi_match_custom_category(ndpi_str, (char *) flow->protos.stun_ssl.ssl.client_requested_server_name, strlen(flow->protos.stun_ssl.ssl.client_requested_server_name), &id); if(rc == 0) { flow->category = ret->category = (ndpi_protocol_category_t) id; return; } } } flow->category = ret->category = ndpi_get_proto_category(ndpi_str, *ret); } /* ********************************************************************************* */ static void ndpi_reset_packet_line_info(struct ndpi_packet_struct *packet) { packet->parsed_lines = 0, packet->empty_line_position_set = 0, packet->host_line.ptr = NULL, packet->host_line.len = 0, packet->referer_line.ptr = NULL, packet->referer_line.len = 0, packet->content_line.ptr = NULL, packet->content_line.len = 0, packet->accept_line.ptr = NULL, packet->accept_line.len = 0, packet->user_agent_line.ptr = NULL, packet->user_agent_line.len = 0, packet->http_url_name.ptr = NULL, packet->http_url_name.len = 0, packet->http_encoding.ptr = NULL, packet->http_encoding.len = 0, packet->http_transfer_encoding.ptr = NULL, packet->http_transfer_encoding.len = 0, packet->http_contentlen.ptr = NULL, packet->http_contentlen.len = 0, packet->content_disposition_line.ptr = NULL, packet->content_disposition_line.len = 0, packet->http_cookie.ptr = NULL, packet->http_cookie.len = 0, packet->http_origin.len = 0, packet->http_origin.ptr = NULL, packet->http_x_session_type.ptr = NULL, packet->http_x_session_type.len = 0, packet->server_line.ptr = NULL, packet->server_line.len = 0, packet->http_method.ptr = NULL, packet->http_method.len = 0, packet->http_response.ptr = NULL, packet->http_response.len = 0, packet->http_num_headers = 0; } /* ********************************************************************************* */ static int ndpi_check_protocol_port_mismatch_exceptions(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_default_ports_tree_node_t *expected_proto, ndpi_protocol *returned_proto) { /* For TLS (and other protocols) it is not simple to guess the exact protocol so before triggering an alert we need to make sure what we have exhausted all the possible options available */ if(returned_proto->master_protocol == NDPI_PROTOCOL_TLS) { switch(expected_proto->proto->protoId) { case NDPI_PROTOCOL_MAIL_IMAPS: case NDPI_PROTOCOL_MAIL_POPS: case NDPI_PROTOCOL_MAIL_SMTPS: return(1); /* This is a reasonable exception */ break; } } return(0); } /* ********************************************************************************* */ static void ndpi_reconcile_protocols(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol *ret) { /* Skype for a host doing MS Teams means MS Teams (MS Teams uses Skype as transport protocol for voice/video) */ if(flow) { /* Do not go for DNS when there is an application protocol. Example DNS.Apple */ if((flow->detected_protocol_stack[1] != NDPI_PROTOCOL_UNKNOWN) && (flow->detected_protocol_stack[0] /* app */ != flow->detected_protocol_stack[1] /* major */)) NDPI_CLR_BIT(flow->risk, NDPI_SUSPICIOUS_DGA_DOMAIN); } switch(ret->app_protocol) { case NDPI_PROTOCOL_MSTEAMS: if(flow->packet.iph && flow->packet.tcp) { // printf("====>> NDPI_PROTOCOL_MSTEAMS\n"); if(ndpi_str->msteams_cache == NULL) ndpi_str->msteams_cache = ndpi_lru_cache_init(1024); if(ndpi_str->msteams_cache) ndpi_lru_add_to_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, (flow->packet.current_time_ms / 1000) & 0xFFFF /* 16 bit */); } break; case NDPI_PROTOCOL_SKYPE: case NDPI_PROTOCOL_SKYPE_CALL: if(flow->packet.iph && flow->packet.udp && ndpi_str->msteams_cache) { u_int16_t when; if(ndpi_lru_find_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, &when, 0 /* Don't remove it as it can be used for other connections */)) { u_int16_t tdiff = ((flow->packet.current_time_ms /1000) & 0xFFFF) - when; if(tdiff < 60 /* sec */) { // printf("====>> NDPI_PROTOCOL_SKYPE(_CALL) -> NDPI_PROTOCOL_MSTEAMS [%u]\n", tdiff); ret->app_protocol = NDPI_PROTOCOL_MSTEAMS; /* Refresh cache */ ndpi_lru_add_to_cache(ndpi_str->msteams_cache, flow->packet.iph->saddr, (flow->packet.current_time_ms / 1000) & 0xFFFF /* 16 bit */); } } } break; } /* switch */ } /* ********************************************************************************* */ ndpi_protocol ndpi_detection_process_packet(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, const unsigned char *packet, const unsigned short packetlen, const u_int64_t current_time_ms, struct ndpi_id_struct *src, struct ndpi_id_struct *dst) { NDPI_SELECTION_BITMASK_PROTOCOL_SIZE ndpi_selection_packet; u_int32_t a; ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; if(ndpi_str->ndpi_log_level >= NDPI_LOG_TRACE) NDPI_LOG(flow ? flow->detected_protocol_stack[0] : NDPI_PROTOCOL_UNKNOWN, ndpi_str, NDPI_LOG_TRACE, "START packet processing\n"); if(flow == NULL) return(ret); else ret.category = flow->category; flow->num_processed_pkts++; /* Init default */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(flow->server_id == NULL) flow->server_id = dst; /* Default */ if(flow->detected_protocol_stack[0] != NDPI_PROTOCOL_UNKNOWN) { if(flow->check_extra_packets) { ndpi_process_extra_packet(ndpi_str, flow, packet, packetlen, current_time_ms, src, dst); /* Update in case of new match */ ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0], ret.category = flow->category; goto invalidate_ptr; } else goto ret_protocols; } /* need at least 20 bytes for ip header */ if(packetlen < 20) { /* reset protocol which is normally done in init_packet_header */ ndpi_int_reset_packet_protocol(&flow->packet); goto invalidate_ptr; } flow->packet.current_time_ms = current_time_ms; /* parse packet */ flow->packet.iph = (struct ndpi_iphdr *) packet; /* we are interested in ipv4 packet */ if(ndpi_init_packet_header(ndpi_str, flow, packetlen) != 0) goto invalidate_ptr; /* detect traffic for tcp or udp only */ flow->src = src, flow->dst = dst; ndpi_connection_tracking(ndpi_str, flow); /* build ndpi_selection packet bitmask */ ndpi_selection_packet = NDPI_SELECTION_BITMASK_PROTOCOL_COMPLETE_TRAFFIC; if(flow->packet.iph != NULL) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_IP | NDPI_SELECTION_BITMASK_PROTOCOL_IPV4_OR_IPV6; if(flow->packet.tcp != NULL) ndpi_selection_packet |= (NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP); if(flow->packet.udp != NULL) ndpi_selection_packet |= (NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP | NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP); if(flow->packet.payload_packet_len != 0) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_HAS_PAYLOAD; if(flow->packet.tcp_retransmission == 0) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_NO_TCP_RETRANSMISSION; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iphv6 != NULL) ndpi_selection_packet |= NDPI_SELECTION_BITMASK_PROTOCOL_IPV6 | NDPI_SELECTION_BITMASK_PROTOCOL_IPV4_OR_IPV6; #endif /* NDPI_DETECTION_SUPPORT_IPV6 */ if((!flow->protocol_id_already_guessed) && ( #ifdef NDPI_DETECTION_SUPPORT_IPV6 flow->packet.iphv6 || #endif flow->packet.iph)) { u_int16_t sport, dport; u_int8_t protocol; u_int8_t user_defined_proto; flow->protocol_id_already_guessed = 1; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(flow->packet.iphv6 != NULL) { protocol = flow->packet.iphv6->ip6_hdr.ip6_un1_nxt; } else #endif { protocol = flow->packet.iph->protocol; } if(flow->packet.udp) sport = ntohs(flow->packet.udp->source), dport = ntohs(flow->packet.udp->dest); else if(flow->packet.tcp) sport = ntohs(flow->packet.tcp->source), dport = ntohs(flow->packet.tcp->dest); else sport = dport = 0; /* guess protocol */ flow->guessed_protocol_id = (int16_t) ndpi_guess_protocol_id(ndpi_str, flow, protocol, sport, dport, &user_defined_proto); flow->guessed_host_protocol_id = ndpi_guess_host_protocol_id(ndpi_str, flow); if(ndpi_str->custom_categories.categories_loaded && flow->packet.iph) { ndpi_fill_ip_protocol_category(ndpi_str, flow->packet.iph->saddr, flow->packet.iph->daddr, &ret); flow->guessed_header_category = ret.category; } else flow->guessed_header_category = NDPI_PROTOCOL_CATEGORY_UNSPECIFIED; if(flow->guessed_protocol_id >= NDPI_MAX_SUPPORTED_PROTOCOLS) { /* This is a custom protocol and it has priority over everything else */ ret.master_protocol = NDPI_PROTOCOL_UNKNOWN, ret.app_protocol = flow->guessed_protocol_id ? flow->guessed_protocol_id : flow->guessed_host_protocol_id; ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } if(user_defined_proto && flow->guessed_protocol_id != NDPI_PROTOCOL_UNKNOWN) { if(flow->packet.iph) { if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { u_int8_t protocol_was_guessed; /* ret.master_protocol = flow->guessed_protocol_id , ret.app_protocol = flow->guessed_host_protocol_id; /\* ****** *\/ */ ret = ndpi_detection_giveup(ndpi_str, flow, 0, &protocol_was_guessed); } ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } } else { /* guess host protocol */ if(flow->packet.iph) { flow->guessed_host_protocol_id = ndpi_guess_host_protocol_id(ndpi_str, flow); /* We could implement a shortcut here skipping dissectors for protocols we have identified by other means such as with the IP However we do NOT stop here and skip invoking the dissectors because we want to dissect the flow (e.g. dissect the TLS) and extract metadata. */ #if SKIP_INVOKING_THE_DISSECTORS if(flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) { /* We have identified a protocol using the IP address so it is not worth to dissect the traffic as we already have the solution */ ret.master_protocol = flow->guessed_protocol_id, ret.app_protocol = flow->guessed_host_protocol_id; } #endif } } } if(flow->guessed_host_protocol_id >= NDPI_MAX_SUPPORTED_PROTOCOLS) { /* This is a custom protocol and it has priority over everything else */ ret.master_protocol = flow->guessed_protocol_id, ret.app_protocol = flow->guessed_host_protocol_id; ndpi_check_flow_func(ndpi_str, flow, &ndpi_selection_packet); ndpi_fill_protocol_category(ndpi_str, flow, &ret); goto invalidate_ptr; } ndpi_check_flow_func(ndpi_str, flow, &ndpi_selection_packet); a = flow->packet.detected_protocol_stack[0]; if(NDPI_COMPARE_PROTOCOL_TO_BITMASK(ndpi_str->detection_bitmask, a) == 0) a = NDPI_PROTOCOL_UNKNOWN; if(a != NDPI_PROTOCOL_UNKNOWN) { int i; for (i = 0; i < sizeof(flow->host_server_name); i++) { if(flow->host_server_name[i] != '\0') flow->host_server_name[i] = tolower(flow->host_server_name[i]); else { flow->host_server_name[i] = '\0'; break; } } } ret_protocols: if(flow->detected_protocol_stack[1] != NDPI_PROTOCOL_UNKNOWN) { ret.master_protocol = flow->detected_protocol_stack[1], ret.app_protocol = flow->detected_protocol_stack[0]; if(ret.app_protocol == ret.master_protocol) ret.master_protocol = NDPI_PROTOCOL_UNKNOWN; } else ret.app_protocol = flow->detected_protocol_stack[0]; /* Don't overwrite the category if already set */ if((flow->category == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN)) ndpi_fill_protocol_category(ndpi_str, flow, &ret); else ret.category = flow->category; if((flow->num_processed_pkts == 1) && (ret.master_protocol == NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol == NDPI_PROTOCOL_UNKNOWN) && flow->packet.tcp && (flow->packet.tcp->syn == 0) && (flow->guessed_protocol_id == 0)) { u_int8_t protocol_was_guessed; /* This is a TCP flow - whose first packet is NOT a SYN - no protocol has been detected We don't see how future packets can match anything hence we giveup here */ ret = ndpi_detection_giveup(ndpi_str, flow, 0, &protocol_was_guessed); } if((ret.master_protocol == NDPI_PROTOCOL_UNKNOWN) && (ret.app_protocol != NDPI_PROTOCOL_UNKNOWN) && (flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN)) { ret.master_protocol = ret.app_protocol; ret.app_protocol = flow->guessed_host_protocol_id; } if((!flow->risk_checked) && (ret.master_protocol != NDPI_PROTOCOL_UNKNOWN)) { ndpi_default_ports_tree_node_t *found; u_int16_t *default_ports, sport, dport; if(flow->packet.udp) found = ndpi_get_guessed_protocol_id(ndpi_str, IPPROTO_UDP, sport = ntohs(flow->packet.udp->source), dport = ntohs(flow->packet.udp->dest)), default_ports = ndpi_str->proto_defaults[ret.master_protocol].udp_default_ports; else if(flow->packet.tcp) found = ndpi_get_guessed_protocol_id(ndpi_str, IPPROTO_TCP, sport = ntohs(flow->packet.tcp->source), dport = ntohs(flow->packet.tcp->dest)), default_ports = ndpi_str->proto_defaults[ret.master_protocol].tcp_default_ports; else found = NULL, default_ports = NULL; if(found && (found->proto->protoId != NDPI_PROTOCOL_UNKNOWN) && (found->proto->protoId != ret.master_protocol)) { // printf("******** %u / %u\n", found->proto->protoId, ret.master_protocol); if(!ndpi_check_protocol_port_mismatch_exceptions(ndpi_str, flow, found, &ret)) NDPI_SET_BIT(flow->risk, NDPI_KNOWN_PROTOCOL_ON_NON_STANDARD_PORT); } else if(default_ports && (default_ports[0] != 0)) { u_int8_t found = 0, i; for(i=0; (i<MAX_DEFAULT_PORTS) && (default_ports[i] != 0); i++) { if((default_ports[i] == sport) || (default_ports[i] == dport)) { found = 1; break; } } /* for */ if(!found) { // printf("******** Invalid default port\n"); NDPI_SET_BIT(flow->risk, NDPI_KNOWN_PROTOCOL_ON_NON_STANDARD_PORT); } } flow->risk_checked = 1; } ndpi_reconcile_protocols(ndpi_str, flow, &ret); invalidate_ptr: /* Invalidate packet memory to avoid accessing the pointers below when the packet is no longer accessible */ flow->packet.iph = NULL, flow->packet.tcp = NULL, flow->packet.udp = NULL, flow->packet.payload = NULL; ndpi_reset_packet_line_info(&flow->packet); return(ret); } /* ********************************************************************************* */ u_int32_t ndpi_bytestream_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; val = 0; // cancel if eof, ' ' or line end chars are reached while (*str >= '0' && *str <= '9' && max_chars_to_read > 0) { val *= 10; val += *str - '0'; str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } return(val); } /* ********************************************************************************* */ #ifdef CODE_UNUSED u_int32_t ndpi_bytestream_dec_or_hex_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; val = 0; if(max_chars_to_read <= 2 || str[0] != '0' || str[1] != 'x') { return(ndpi_bytestream_to_number(str, max_chars_to_read, bytes_read)); } else { /*use base 16 system */ str += 2; max_chars_to_read -= 2; *bytes_read = *bytes_read + 2; while (max_chars_to_read > 0) { if(*str >= '0' && *str <= '9') { val *= 16; val += *str - '0'; } else if(*str >= 'a' && *str <= 'f') { val *= 16; val += *str + 10 - 'a'; } else if(*str >= 'A' && *str <= 'F') { val *= 16; val += *str + 10 - 'A'; } else { break; } str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } } return(val); } #endif /* ********************************************************************************* */ u_int64_t ndpi_bytestream_to_number64(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int64_t val; val = 0; // cancel if eof, ' ' or line end chars are reached while (max_chars_to_read > 0 && *str >= '0' && *str <= '9') { val *= 10; val += *str - '0'; str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } return(val); } /* ********************************************************************************* */ u_int64_t ndpi_bytestream_dec_or_hex_to_number64(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int64_t val; val = 0; if(max_chars_to_read <= 2 || str[0] != '0' || str[1] != 'x') { return(ndpi_bytestream_to_number64(str, max_chars_to_read, bytes_read)); } else { /*use base 16 system */ str += 2; max_chars_to_read -= 2; *bytes_read = *bytes_read + 2; while (max_chars_to_read > 0) { if(*str >= '0' && *str <= '9') { val *= 16; val += *str - '0'; } else if(*str >= 'a' && *str <= 'f') { val *= 16; val += *str + 10 - 'a'; } else if(*str >= 'A' && *str <= 'F') { val *= 16; val += *str + 10 - 'A'; } else { break; } str++; max_chars_to_read = max_chars_to_read - 1; *bytes_read = *bytes_read + 1; } } return(val); } /* ********************************************************************************* */ u_int32_t ndpi_bytestream_to_ipv4(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int32_t val; u_int16_t read = 0; u_int16_t oldread; u_int32_t c; /* ip address must be X.X.X.X with each X between 0 and 255 */ oldread = read; c = ndpi_bytestream_to_number(str, max_chars_to_read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = c << 24; oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = val + (c << 16); oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read || str[read] != '.') return(0); read++; val = val + (c << 8); oldread = read; c = ndpi_bytestream_to_number(&str[read], max_chars_to_read - read, &read); if(c > 255 || oldread == read || max_chars_to_read == read) return(0); val = val + c; *bytes_read = *bytes_read + read; return(htonl(val)); } /* ********************************************************************************* */ /* internal function for every detection to parse one packet and to increase the info buffer */ void ndpi_parse_packet_line_info(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int32_t a; struct ndpi_packet_struct *packet = &flow->packet; if((packet->payload_packet_len < 3) || (packet->payload == NULL)) return; if(packet->packet_lines_parsed_complete != 0) return; packet->packet_lines_parsed_complete = 1; ndpi_reset_packet_line_info(packet); packet->line[packet->parsed_lines].ptr = packet->payload; packet->line[packet->parsed_lines].len = 0; for (a = 0; ((a+1) < packet->payload_packet_len) && (packet->parsed_lines < NDPI_MAX_PARSE_LINES_PER_PACKET); a++) { if((packet->payload[a] == 0x0d) && (packet->payload[a+1] == 0x0a)) { /* If end of line char sequence CR+NL "\r\n", process line */ if(((a + 3) < packet->payload_packet_len) && (packet->payload[a+2] == 0x0d) && (packet->payload[a+3] == 0x0a)) { /* \r\n\r\n */ int diff; /* No unsigned ! */ u_int32_t a1 = a + 4; diff = packet->payload_packet_len - a1; if(diff > 0) { diff = ndpi_min(diff, sizeof(flow->initial_binary_bytes)); memcpy(&flow->initial_binary_bytes, &packet->payload[a1], diff); flow->initial_binary_bytes_len = diff; } } packet->line[packet->parsed_lines].len = (u_int16_t)(((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); /* First line of a HTTP response parsing. Expected a "HTTP/1.? ???" */ if(packet->parsed_lines == 0 && packet->line[0].len >= NDPI_STATICSTRING_LEN("HTTP/1.X 200 ") && strncasecmp((const char *) packet->line[0].ptr, "HTTP/1.", NDPI_STATICSTRING_LEN("HTTP/1.")) == 0 && packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] > '0' && /* response code between 000 and 699 */ packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.X ")] < '6') { packet->http_response.ptr = &packet->line[0].ptr[NDPI_STATICSTRING_LEN("HTTP/1.1 ")]; packet->http_response.len = packet->line[0].len - NDPI_STATICSTRING_LEN("HTTP/1.1 "); packet->http_num_headers++; /* Set server HTTP response code */ if(packet->payload_packet_len >= 12) { char buf[4]; /* Set server HTTP response code */ strncpy(buf, (char *) &packet->payload[9], 3); buf[3] = '\0'; flow->http.response_status_code = atoi(buf); /* https://en.wikipedia.org/wiki/List_of_HTTP_status_codes */ if((flow->http.response_status_code < 100) || (flow->http.response_status_code > 509)) flow->http.response_status_code = 0; /* Out of range */ } } /* "Server:" header line in HTTP response */ if(packet->line[packet->parsed_lines].len > NDPI_STATICSTRING_LEN("Server:") + 1 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Server:", NDPI_STATICSTRING_LEN("Server:")) == 0) { // some stupid clients omit a space and place the servername directly after the colon if(packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")] == ' ') { packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:") + 1]; packet->server_line.len = packet->line[packet->parsed_lines].len - (NDPI_STATICSTRING_LEN("Server:") + 1); } else { packet->server_line.ptr = &packet->line[packet->parsed_lines].ptr[NDPI_STATICSTRING_LEN("Server:")]; packet->server_line.len = packet->line[packet->parsed_lines].len - NDPI_STATICSTRING_LEN("Server:"); } packet->http_num_headers++; } /* "Host:" header line in HTTP request */ if(packet->line[packet->parsed_lines].len > 6 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Host:", 5) == 0) { // some stupid clients omit a space and place the hostname directly after the colon if(packet->line[packet->parsed_lines].ptr[5] == ' ') { packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[6]; packet->host_line.len = packet->line[packet->parsed_lines].len - 6; } else { packet->host_line.ptr = &packet->line[packet->parsed_lines].ptr[5]; packet->host_line.len = packet->line[packet->parsed_lines].len - 5; } packet->http_num_headers++; } /* "X-Forwarded-For:" header line in HTTP request. Commonly used for HTTP proxies. */ if(packet->line[packet->parsed_lines].len > 17 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Forwarded-For:", 16) == 0) { // some stupid clients omit a space and place the hostname directly after the colon if(packet->line[packet->parsed_lines].ptr[16] == ' ') { packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[17]; packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 17; } else { packet->forwarded_line.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->forwarded_line.len = packet->line[packet->parsed_lines].len - 16; } packet->http_num_headers++; } /* "Content-Type:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 14 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Type: ", 14) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type: ", 14) == 0)) { packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[14]; packet->content_line.len = packet->line[packet->parsed_lines].len - 14; while ((packet->content_line.len > 0) && (packet->content_line.ptr[0] == ' ')) packet->content_line.len--, packet->content_line.ptr++; packet->http_num_headers++; } /* "Content-Type:" header line in HTTP AGAIN. Probably a bogus response without space after ":" */ if((packet->content_line.len == 0) && (packet->line[packet->parsed_lines].len > 13) && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-type:", 13) == 0)) { packet->content_line.ptr = &packet->line[packet->parsed_lines].ptr[13]; packet->content_line.len = packet->line[packet->parsed_lines].len - 13; packet->http_num_headers++; } if(packet->content_line.len > 0) { /* application/json; charset=utf-8 */ char separator[] = {';', '\r', '\0'}; int i; for (i = 0; separator[i] != '\0'; i++) { char *c = memchr((char *) packet->content_line.ptr, separator[i], packet->content_line.len); if(c != NULL) packet->content_line.len = c - (char *) packet->content_line.ptr; } } /* "Accept:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept: ", 8) == 0) { packet->accept_line.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->accept_line.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "Referer:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 9 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Referer: ", 9) == 0) { packet->referer_line.ptr = &packet->line[packet->parsed_lines].ptr[9]; packet->referer_line.len = packet->line[packet->parsed_lines].len - 9; packet->http_num_headers++; } /* "User-Agent:" header line in HTTP request. */ if(packet->line[packet->parsed_lines].len > 12 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-Agent: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "User-agent: ", 12) == 0)) { packet->user_agent_line.ptr = &packet->line[packet->parsed_lines].ptr[12]; packet->user_agent_line.len = packet->line[packet->parsed_lines].len - 12; packet->http_num_headers++; } /* "Content-Encoding:" header line in HTTP response (and request?). */ if(packet->line[packet->parsed_lines].len > 18 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Encoding: ", 18) == 0) { packet->http_encoding.ptr = &packet->line[packet->parsed_lines].ptr[18]; packet->http_encoding.len = packet->line[packet->parsed_lines].len - 18; packet->http_num_headers++; } /* "Transfer-Encoding:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 19 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Transfer-Encoding: ", 19) == 0) { packet->http_transfer_encoding.ptr = &packet->line[packet->parsed_lines].ptr[19]; packet->http_transfer_encoding.len = packet->line[packet->parsed_lines].len - 19; packet->http_num_headers++; } /* "Content-Length:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 16 && ((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Length: ", 16) == 0) || (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "content-length: ", 16) == 0))) { packet->http_contentlen.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->http_contentlen.len = packet->line[packet->parsed_lines].len - 16; packet->http_num_headers++; } /* "Content-Disposition"*/ if(packet->line[packet->parsed_lines].len > 21 && ((strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Content-Disposition: ", 21) == 0))) { packet->content_disposition_line.ptr = &packet->line[packet->parsed_lines].ptr[21]; packet->content_disposition_line.len = packet->line[packet->parsed_lines].len - 21; packet->http_num_headers++; } /* "Cookie:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Cookie: ", 8) == 0) { packet->http_cookie.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->http_cookie.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "Origin:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Origin: ", 8) == 0) { packet->http_origin.ptr = &packet->line[packet->parsed_lines].ptr[8]; packet->http_origin.len = packet->line[packet->parsed_lines].len - 8; packet->http_num_headers++; } /* "X-Session-Type:" header line in HTTP. */ if(packet->line[packet->parsed_lines].len > 16 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "X-Session-Type: ", 16) == 0) { packet->http_x_session_type.ptr = &packet->line[packet->parsed_lines].ptr[16]; packet->http_x_session_type.len = packet->line[packet->parsed_lines].len - 16; packet->http_num_headers++; } /* Identification and counting of other HTTP headers. * We consider the most common headers, but there are many others, * which can be seen at references below: * - https://tools.ietf.org/html/rfc7230 * - https://en.wikipedia.org/wiki/List_of_HTTP_header_fields */ if((packet->line[packet->parsed_lines].len > 6 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Date: ", 6) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Vary: ", 6) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "ETag: ", 6) == 0)) || (packet->line[packet->parsed_lines].len > 8 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Pragma: ", 8) == 0) || (packet->line[packet->parsed_lines].len > 9 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Expires: ", 9) == 0) || (packet->line[packet->parsed_lines].len > 12 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Set-Cookie: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Keep-Alive: ", 12) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Connection: ", 12) == 0)) || (packet->line[packet->parsed_lines].len > 15 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Last-Modified: ", 15) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Ranges: ", 15) == 0)) || (packet->line[packet->parsed_lines].len > 17 && (strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Language: ", 17) == 0 || strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Accept-Encoding: ", 17) == 0)) || (packet->line[packet->parsed_lines].len > 27 && strncasecmp((const char *) packet->line[packet->parsed_lines].ptr, "Upgrade-Insecure-Requests: ", 27) == 0)) { /* Just count. In the future, if needed, this if can be splited to parse these headers */ packet->http_num_headers++; } if(packet->line[packet->parsed_lines].len == 0) { packet->empty_line_position = a; packet->empty_line_position_set = 1; } if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1)) return; packet->parsed_lines++; packet->line[packet->parsed_lines].ptr = &packet->payload[a + 2]; packet->line[packet->parsed_lines].len = 0; a++; /* next char in the payload */ } } if(packet->parsed_lines >= 1) { packet->line[packet->parsed_lines].len = (u_int16_t)(((unsigned long) &packet->payload[packet->payload_packet_len]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); packet->parsed_lines++; } } /* ********************************************************************************* */ void ndpi_parse_packet_line_info_any(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t a; u_int16_t end = packet->payload_packet_len; if(packet->packet_lines_parsed_complete != 0) return; packet->packet_lines_parsed_complete = 1; packet->parsed_lines = 0; if(packet->payload_packet_len == 0) return; packet->line[packet->parsed_lines].ptr = packet->payload; packet->line[packet->parsed_lines].len = 0; for (a = 0; a < end; a++) { if(packet->payload[a] == 0x0a) { packet->line[packet->parsed_lines].len = (u_int16_t)( ((unsigned long) &packet->payload[a]) - ((unsigned long) packet->line[packet->parsed_lines].ptr)); if(a > 0 && packet->payload[a - 1] == 0x0d) packet->line[packet->parsed_lines].len--; if(packet->parsed_lines >= (NDPI_MAX_PARSE_LINES_PER_PACKET - 1)) break; packet->parsed_lines++; packet->line[packet->parsed_lines].ptr = &packet->payload[a + 1]; packet->line[packet->parsed_lines].len = 0; if((a + 1) >= packet->payload_packet_len) break; //a++; } } } /* ********************************************************************************* */ u_int16_t ndpi_check_for_email_address(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t counter) { struct ndpi_packet_struct *packet = &flow->packet; NDPI_LOG_DBG2(ndpi_str, "called ndpi_check_for_email_address\n"); if(packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_')) { NDPI_LOG_DBG2(ndpi_str, "first letter\n"); counter++; while (packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_' || packet->payload[counter] == '.')) { NDPI_LOG_DBG2(ndpi_str, "further letter\n"); counter++; if(packet->payload_packet_len > counter && packet->payload[counter] == '@') { NDPI_LOG_DBG2(ndpi_str, "@\n"); counter++; while (packet->payload_packet_len > counter && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') || (packet->payload[counter] >= 'A' && packet->payload[counter] <= 'Z') || (packet->payload[counter] >= '0' && packet->payload[counter] <= '9') || packet->payload[counter] == '-' || packet->payload[counter] == '_')) { NDPI_LOG_DBG2(ndpi_str, "letter\n"); counter++; if(packet->payload_packet_len > counter && packet->payload[counter] == '.') { NDPI_LOG_DBG2(ndpi_str, ".\n"); counter++; if(packet->payload_packet_len > counter + 1 && ((packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') && (packet->payload[counter + 1] >= 'a' && packet->payload[counter + 1] <= 'z'))) { NDPI_LOG_DBG2(ndpi_str, "two letters\n"); counter += 2; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace1\n"); return(counter); } else if(packet->payload_packet_len > counter && packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') { NDPI_LOG_DBG2(ndpi_str, "one letter\n"); counter++; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace2\n"); return(counter); } else if(packet->payload_packet_len > counter && packet->payload[counter] >= 'a' && packet->payload[counter] <= 'z') { counter++; if(packet->payload_packet_len > counter && (packet->payload[counter] == ' ' || packet->payload[counter] == ';')) { NDPI_LOG_DBG2(ndpi_str, "whitespace3\n"); return(counter); } else { return(0); } } else { return(0); } } else { return(0); } } else { return(0); } } } return(0); } } } return(0); } #ifdef NDPI_ENABLE_DEBUG_MESSAGES /* ********************************************************************************* */ void ndpi_debug_get_last_log_function_line(struct ndpi_detection_module_struct *ndpi_str, const char **file, const char **func, u_int32_t *line) { *file = ""; *func = ""; if(ndpi_str->ndpi_debug_print_file != NULL) *file = ndpi_str->ndpi_debug_print_file; if(ndpi_str->ndpi_debug_print_function != NULL) *func = ndpi_str->ndpi_debug_print_function; *line = ndpi_str->ndpi_debug_print_line; } #endif /* ********************************************************************************* */ u_int8_t ndpi_detection_get_l4(const u_int8_t *l3, u_int16_t l3_len, const u_int8_t **l4_return, u_int16_t *l4_len_return, u_int8_t *l4_protocol_return, u_int32_t flags) { return(ndpi_detection_get_l4_internal(NULL, l3, l3_len, l4_return, l4_len_return, l4_protocol_return, flags)); } /* ********************************************************************************* */ void ndpi_set_detected_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { struct ndpi_id_struct *src = flow->src, *dst = flow->dst; ndpi_int_change_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); if(src != NULL) { NDPI_ADD_PROTOCOL_TO_BITMASK(src->detected_protocol_bitmask, upper_detected_protocol); if(lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN) NDPI_ADD_PROTOCOL_TO_BITMASK(src->detected_protocol_bitmask, lower_detected_protocol); } if(dst != NULL) { NDPI_ADD_PROTOCOL_TO_BITMASK(dst->detected_protocol_bitmask, upper_detected_protocol); if(lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN) NDPI_ADD_PROTOCOL_TO_BITMASK(dst->detected_protocol_bitmask, lower_detected_protocol); } } /* ********************************************************************************* */ u_int16_t ndpi_get_flow_masterprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { return(flow->detected_protocol_stack[1]); } /* ********************************************************************************* */ void ndpi_int_change_flow_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { if(!flow) return; flow->detected_protocol_stack[0] = upper_detected_protocol, flow->detected_protocol_stack[1] = lower_detected_protocol; } /* ********************************************************************************* */ void ndpi_int_change_packet_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { struct ndpi_packet_struct *packet = &flow->packet; /* NOTE: everything below is identically to change_flow_protocol * except flow->packet If you want to change something here, * don't! Change it for the flow function and apply it here * as well */ if(!packet) return; packet->detected_protocol_stack[0] = upper_detected_protocol, packet->detected_protocol_stack[1] = lower_detected_protocol; } /* ********************************************************************************* */ /* generic function for changing the protocol * * what it does is: * 1.update the flow protocol stack with the new protocol * 2.update the packet protocol stack with the new protocol */ void ndpi_int_change_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int16_t upper_detected_protocol, u_int16_t lower_detected_protocol) { if((upper_detected_protocol == NDPI_PROTOCOL_UNKNOWN) && (lower_detected_protocol != NDPI_PROTOCOL_UNKNOWN)) upper_detected_protocol = lower_detected_protocol; if(upper_detected_protocol == lower_detected_protocol) lower_detected_protocol = NDPI_PROTOCOL_UNKNOWN; if((upper_detected_protocol != NDPI_PROTOCOL_UNKNOWN) && (lower_detected_protocol == NDPI_PROTOCOL_UNKNOWN)) { if((flow->guessed_host_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (upper_detected_protocol != flow->guessed_host_protocol_id)) { if(ndpi_str->proto_defaults[upper_detected_protocol].can_have_a_subprotocol) { lower_detected_protocol = upper_detected_protocol; upper_detected_protocol = flow->guessed_host_protocol_id; } } } ndpi_int_change_flow_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); ndpi_int_change_packet_protocol(ndpi_str, flow, upper_detected_protocol, lower_detected_protocol); } /* ********************************************************************************* */ void ndpi_int_change_category(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, ndpi_protocol_category_t protocol_category) { flow->category = protocol_category; } /* ********************************************************************************* */ /* turns a packet back to unknown */ void ndpi_int_reset_packet_protocol(struct ndpi_packet_struct *packet) { int a; for (a = 0; a < NDPI_PROTOCOL_SIZE; a++) packet->detected_protocol_stack[a] = NDPI_PROTOCOL_UNKNOWN; } /* ********************************************************************************* */ void ndpi_int_reset_protocol(struct ndpi_flow_struct *flow) { if(flow) { int a; for (a = 0; a < NDPI_PROTOCOL_SIZE; a++) flow->detected_protocol_stack[a] = NDPI_PROTOCOL_UNKNOWN; } } /* ********************************************************************************* */ void NDPI_PROTOCOL_IP_clear(ndpi_ip_addr_t *ip) { memset(ip, 0, sizeof(ndpi_ip_addr_t)); } /* ********************************************************************************* */ #ifdef CODE_UNUSED /* NTOP */ int NDPI_PROTOCOL_IP_is_set(const ndpi_ip_addr_t *ip) { return(memcmp(ip, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", sizeof(ndpi_ip_addr_t)) != 0); } #endif /* ********************************************************************************* */ /* check if the source ip address in packet and ip are equal */ /* NTOP */ int ndpi_packet_src_ip_eql(const struct ndpi_packet_struct *packet, const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { if(packet->iphv6->ip6_src.u6_addr.u6_addr32[0] == ip->ipv6.u6_addr.u6_addr32[0] && packet->iphv6->ip6_src.u6_addr.u6_addr32[1] == ip->ipv6.u6_addr.u6_addr32[1] && packet->iphv6->ip6_src.u6_addr.u6_addr32[2] == ip->ipv6.u6_addr.u6_addr32[2] && packet->iphv6->ip6_src.u6_addr.u6_addr32[3] == ip->ipv6.u6_addr.u6_addr32[3]) return(1); //else return(0); } #endif /* IPv4 */ if(packet->iph->saddr == ip->ipv4) return(1); return(0); } /* ********************************************************************************* */ /* check if the destination ip address in packet and ip are equal */ int ndpi_packet_dst_ip_eql(const struct ndpi_packet_struct *packet, const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { if(packet->iphv6->ip6_dst.u6_addr.u6_addr32[0] == ip->ipv6.u6_addr.u6_addr32[0] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[1] == ip->ipv6.u6_addr.u6_addr32[1] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[2] == ip->ipv6.u6_addr.u6_addr32[2] && packet->iphv6->ip6_dst.u6_addr.u6_addr32[3] == ip->ipv6.u6_addr.u6_addr32[3]) return(1); //else return(0); } #endif /* IPv4 */ if(packet->iph->saddr == ip->ipv4) return(1); return(0); } /* ********************************************************************************* */ /* get the source ip address from packet and put it into ip */ /* NTOP */ void ndpi_packet_src_ip_get(const struct ndpi_packet_struct *packet, ndpi_ip_addr_t *ip) { NDPI_PROTOCOL_IP_clear(ip); #ifdef NDPI_DETECTION_SUPPORT_IPV6 /* IPv6 */ if(packet->iphv6 != NULL) { ip->ipv6.u6_addr.u6_addr32[0] = packet->iphv6->ip6_src.u6_addr.u6_addr32[0]; ip->ipv6.u6_addr.u6_addr32[1] = packet->iphv6->ip6_src.u6_addr.u6_addr32[1]; ip->ipv6.u6_addr.u6_addr32[2] = packet->iphv6->ip6_src.u6_addr.u6_addr32[2]; ip->ipv6.u6_addr.u6_addr32[3] = packet->iphv6->ip6_src.u6_addr.u6_addr32[3]; } else #endif /* IPv4 */ ip->ipv4 = packet->iph->saddr; } /* ********************************************************************************* */ /* get the destination ip address from packet and put it into ip */ /* NTOP */ void ndpi_packet_dst_ip_get(const struct ndpi_packet_struct *packet, ndpi_ip_addr_t *ip) { NDPI_PROTOCOL_IP_clear(ip); #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(packet->iphv6 != NULL) { ip->ipv6.u6_addr.u6_addr32[0] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[0]; ip->ipv6.u6_addr.u6_addr32[1] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[1]; ip->ipv6.u6_addr.u6_addr32[2] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[2]; ip->ipv6.u6_addr.u6_addr32[3] = packet->iphv6->ip6_dst.u6_addr.u6_addr32[3]; } else #endif ip->ipv4 = packet->iph->daddr; } /* ********************************************************************************* */ u_int8_t ndpi_is_ipv6(const ndpi_ip_addr_t *ip) { #ifdef NDPI_DETECTION_SUPPORT_IPV6 return(ip->ipv6.u6_addr.u6_addr32[1] != 0 || ip->ipv6.u6_addr.u6_addr32[2] != 0 || ip->ipv6.u6_addr.u6_addr32[3] != 0); #else return(0); #endif } /* ********************************************************************************* */ char *ndpi_get_ip_string(const ndpi_ip_addr_t *ip, char *buf, u_int buf_len) { const u_int8_t *a = (const u_int8_t *) &ip->ipv4; #ifdef NDPI_DETECTION_SUPPORT_IPV6 if(ndpi_is_ipv6(ip)) { if(inet_ntop(AF_INET6, &ip->ipv6.u6_addr, buf, buf_len) == NULL) buf[0] = '\0'; return(buf); } #endif snprintf(buf, buf_len, "%u.%u.%u.%u", a[0], a[1], a[2], a[3]); return(buf); } /* ****************************************************** */ /* Returns -1 on failutre, otherwise fills parsed_ip and returns the IP version */ int ndpi_parse_ip_string(const char *ip_str, ndpi_ip_addr_t *parsed_ip) { int rv = -1; memset(parsed_ip, 0, sizeof(*parsed_ip)); if(strchr(ip_str, '.')) { if(inet_pton(AF_INET, ip_str, &parsed_ip->ipv4) > 0) rv = 4; #ifdef NDPI_DETECTION_SUPPORT_IPV6 } else { if(inet_pton(AF_INET6, ip_str, &parsed_ip->ipv6) > 0) rv = 6; #endif } return(rv); } /* ****************************************************** */ u_int16_t ntohs_ndpi_bytestream_to_number(const u_int8_t *str, u_int16_t max_chars_to_read, u_int16_t *bytes_read) { u_int16_t val = ndpi_bytestream_to_number(str, max_chars_to_read, bytes_read); return(ntohs(val)); } /* ****************************************************** */ u_int8_t ndpi_is_proto(ndpi_protocol proto, u_int16_t p) { return(((proto.app_protocol == p) || (proto.master_protocol == p)) ? 1 : 0); } /* ****************************************************** */ u_int16_t ndpi_get_lower_proto(ndpi_protocol proto) { return((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) ? proto.master_protocol : proto.app_protocol); } /* ****************************************************** */ ndpi_protocol ndpi_guess_undetected_protocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, u_int8_t proto, u_int32_t shost /* host byte order */, u_int16_t sport, u_int32_t dhost /* host byte order */, u_int16_t dport) { u_int32_t rc; struct in_addr addr; ndpi_protocol ret = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; u_int8_t user_defined_proto; if((proto == IPPROTO_TCP) || (proto == IPPROTO_UDP)) { rc = ndpi_search_tcp_or_udp_raw(ndpi_str, flow, proto, shost, dhost, sport, dport); if(rc != NDPI_PROTOCOL_UNKNOWN) { if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, rc) && is_udp_guessable_protocol(rc)) ; else { ret.app_protocol = rc, ret.master_protocol = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); if(ret.app_protocol == ret.master_protocol) ret.master_protocol = NDPI_PROTOCOL_UNKNOWN; ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } } rc = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); if(rc != NDPI_PROTOCOL_UNKNOWN) { if(flow && (proto == IPPROTO_UDP) && NDPI_COMPARE_PROTOCOL_TO_BITMASK(flow->excluded_protocol_bitmask, rc) && is_udp_guessable_protocol(rc)) ; else { ret.app_protocol = rc; if(rc == NDPI_PROTOCOL_TLS) goto check_guessed_skype; else { ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } } } check_guessed_skype: addr.s_addr = htonl(shost); if(ndpi_network_ptree_match(ndpi_str, &addr) == NDPI_PROTOCOL_SKYPE) { ret.app_protocol = NDPI_PROTOCOL_SKYPE; } else { addr.s_addr = htonl(dhost); if(ndpi_network_ptree_match(ndpi_str, &addr) == NDPI_PROTOCOL_SKYPE) ret.app_protocol = NDPI_PROTOCOL_SKYPE; } } else ret.app_protocol = ndpi_guess_protocol_id(ndpi_str, flow, proto, sport, dport, &user_defined_proto); ret.category = ndpi_get_proto_category(ndpi_str, ret); return(ret); } /* ****************************************************** */ char *ndpi_protocol2id(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto, char *buf, u_int buf_len) { if((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (proto.master_protocol != proto.app_protocol)) { if(proto.app_protocol != NDPI_PROTOCOL_UNKNOWN) snprintf(buf, buf_len, "%u.%u", proto.master_protocol, proto.app_protocol); else snprintf(buf, buf_len, "%u", proto.master_protocol); } else snprintf(buf, buf_len, "%u", proto.app_protocol); return(buf); } /* ****************************************************** */ char *ndpi_protocol2name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto, char *buf, u_int buf_len) { if((proto.master_protocol != NDPI_PROTOCOL_UNKNOWN) && (proto.master_protocol != proto.app_protocol)) { if(proto.app_protocol != NDPI_PROTOCOL_UNKNOWN) snprintf(buf, buf_len, "%s.%s", ndpi_get_proto_name(ndpi_str, proto.master_protocol), ndpi_get_proto_name(ndpi_str, proto.app_protocol)); else snprintf(buf, buf_len, "%s", ndpi_get_proto_name(ndpi_str, proto.master_protocol)); } else snprintf(buf, buf_len, "%s", ndpi_get_proto_name(ndpi_str, proto.app_protocol)); return(buf); } /* ****************************************************** */ int ndpi_is_custom_category(ndpi_protocol_category_t category) { switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: return(1); break; default: return(0); break; } } /* ****************************************************** */ void ndpi_category_set_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_category_t category, char *name) { if(!name) return; switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: snprintf(ndpi_str->custom_category_labels[0], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: snprintf(ndpi_str->custom_category_labels[1], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: snprintf(ndpi_str->custom_category_labels[2], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: snprintf(ndpi_str->custom_category_labels[3], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: snprintf(ndpi_str->custom_category_labels[4], CUSTOM_CATEGORY_LABEL_LEN, "%s", name); break; default: break; } } /* ****************************************************** */ const char *ndpi_category_get_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_category_t category) { if((!ndpi_str) || (category >= NDPI_PROTOCOL_NUM_CATEGORIES)) { static char b[24]; if(!ndpi_str) snprintf(b, sizeof(b), "NULL nDPI"); else snprintf(b, sizeof(b), "Invalid category %d", (int) category); return(b); } if((category >= NDPI_PROTOCOL_CATEGORY_CUSTOM_1) && (category <= NDPI_PROTOCOL_CATEGORY_CUSTOM_5)) { switch (category) { case NDPI_PROTOCOL_CATEGORY_CUSTOM_1: return(ndpi_str->custom_category_labels[0]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_2: return(ndpi_str->custom_category_labels[1]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_3: return(ndpi_str->custom_category_labels[2]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_4: return(ndpi_str->custom_category_labels[3]); case NDPI_PROTOCOL_CATEGORY_CUSTOM_5: return(ndpi_str->custom_category_labels[4]); case NDPI_PROTOCOL_NUM_CATEGORIES: return("Code should not use this internal constant"); default: return("Unspecified"); } } else return(categories[category]); } /* ****************************************************** */ ndpi_protocol_category_t ndpi_get_proto_category(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol proto) { if(proto.category != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) return(proto.category); /* simple rule: sub protocol first, master after */ else if((proto.master_protocol == NDPI_PROTOCOL_UNKNOWN) || (ndpi_str->proto_defaults[proto.app_protocol].protoCategory != NDPI_PROTOCOL_CATEGORY_UNSPECIFIED)) { if(proto.app_protocol < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) return(ndpi_str->proto_defaults[proto.app_protocol].protoCategory); } else if(proto.master_protocol < (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) return(ndpi_str->proto_defaults[proto.master_protocol].protoCategory); return(NDPI_PROTOCOL_CATEGORY_UNSPECIFIED); } /* ****************************************************** */ char *ndpi_get_proto_name(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoName); } /* ****************************************************** */ ndpi_protocol_breed_t ndpi_get_proto_breed(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoBreed); } /* ****************************************************** */ char *ndpi_get_proto_breed_name(struct ndpi_detection_module_struct *ndpi_str, ndpi_protocol_breed_t breed_id) { switch (breed_id) { case NDPI_PROTOCOL_SAFE: return("Safe"); break; case NDPI_PROTOCOL_ACCEPTABLE: return("Acceptable"); break; case NDPI_PROTOCOL_FUN: return("Fun"); break; case NDPI_PROTOCOL_UNSAFE: return("Unsafe"); break; case NDPI_PROTOCOL_POTENTIALLY_DANGEROUS: return("Potentially Dangerous"); break; case NDPI_PROTOCOL_DANGEROUS: return("Dangerous"); break; case NDPI_PROTOCOL_UNRATED: default: return("Unrated"); break; } } /* ****************************************************** */ int ndpi_get_protocol_id(struct ndpi_detection_module_struct *ndpi_str, char *proto) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) if(strcasecmp(proto, ndpi_str->proto_defaults[i].protoName) == 0) return(i); return(-1); } /* ****************************************************** */ int ndpi_get_category_id(struct ndpi_detection_module_struct *ndpi_str, char *cat) { int i; for (i = 0; i < NDPI_PROTOCOL_NUM_CATEGORIES; i++) { const char *name = ndpi_category_get_name(ndpi_str, i); if(strcasecmp(cat, name) == 0) return(i); } return(-1); } /* ****************************************************** */ void ndpi_dump_protocols(struct ndpi_detection_module_struct *ndpi_str) { int i; for (i = 0; i < (int) ndpi_str->ndpi_num_supported_protocols; i++) printf("%3d %-22s %-8s %-12s %s\n", i, ndpi_str->proto_defaults[i].protoName, ndpi_get_l4_proto_name(ndpi_get_l4_proto_info(ndpi_str, i)), ndpi_get_proto_breed_name(ndpi_str, ndpi_str->proto_defaults[i].protoBreed), ndpi_category_get_name(ndpi_str, ndpi_str->proto_defaults[i].protoCategory)); } /* ****************************************************** */ /* * Find the first occurrence of find in s, where the search is limited to the * first slen characters of s. */ char *ndpi_strnstr(const char *s, const char *find, size_t slen) { char c; size_t len; if((c = *find++) != '\0') { len = strnlen(find, slen); do { char sc; do { if(slen-- < 1 || (sc = *s++) == '\0') return(NULL); } while (sc != c); if(len > slen) return(NULL); } while (strncmp(s, find, len) != 0); s--; } return((char *) s); } /* ****************************************************** */ /* * Same as ndpi_strnstr but case-insensitive */ const char * ndpi_strncasestr(const char *str1, const char *str2, size_t len) { size_t str1_len = strnlen(str1, len); size_t str2_len = strlen(str2); size_t i; for(i = 0; i < (str1_len - str2_len + 1); i++){ if(str1[0] == '\0') return NULL; else if(strncasecmp(str1, str2, str2_len) == 0) return(str1); str1++; } return NULL; } /* ****************************************************** */ int ndpi_match_prefix(const u_int8_t *payload, size_t payload_len, const char *str, size_t str_len) { int rc = str_len <= payload_len ? memcmp(payload, str, str_len) == 0 : 0; return(rc); } /* ****************************************************** */ int ndpi_match_string_subprotocol(struct ndpi_detection_module_struct *ndpi_str, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int8_t is_host_match) { AC_TEXT_t ac_input_text; ndpi_automa *automa = is_host_match ? &ndpi_str->host_automa : &ndpi_str->content_automa; AC_REP_t match = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED}; int rc; if((automa->ac_automa == NULL) || (string_to_match_len == 0)) return(NDPI_PROTOCOL_UNKNOWN); if(!automa->ac_automa_finalized) { printf("[%s:%d] [NDPI] Internal error: please call ndpi_finalize_initalization()\n", __FILE__, __LINE__); return(0); /* No matches */ } ac_input_text.astring = string_to_match, ac_input_text.length = string_to_match_len; rc = ac_automata_search(((AC_AUTOMATA_t *) automa->ac_automa), &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; /* We need to take into account also rc == 0 that is used for partial matches */ ret_match->protocol_id = match.number, ret_match->protocol_category = match.category, ret_match->protocol_breed = match.breed; return(rc ? match.number : 0); } /* **************************************** */ static u_int8_t ndpi_is_more_generic_protocol(u_int16_t previous_proto, u_int16_t new_proto) { /* Sometimes certificates are more generic than previously identified protocols */ if((previous_proto == NDPI_PROTOCOL_UNKNOWN) || (previous_proto == new_proto)) return(0); switch (previous_proto) { case NDPI_PROTOCOL_WHATSAPP_CALL: case NDPI_PROTOCOL_WHATSAPP_FILES: if(new_proto == NDPI_PROTOCOL_WHATSAPP) return(1); } return(0); } /* ****************************************************** */ static u_int16_t ndpi_automa_match_string_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, u_int16_t master_protocol_id, ndpi_protocol_match_result *ret_match, u_int8_t is_host_match) { int matching_protocol_id; struct ndpi_packet_struct *packet = &flow->packet; matching_protocol_id = ndpi_match_string_subprotocol(ndpi_str, string_to_match, string_to_match_len, ret_match, is_host_match); #ifdef DEBUG { char m[256]; int len = ndpi_min(sizeof(m), string_to_match_len); strncpy(m, string_to_match, len); m[len] = '\0'; NDPI_LOG_DBG2(ndpi_str, "[NDPI] ndpi_match_host_subprotocol(%s): %s\n", m, ndpi_str->proto_defaults[matching_protocol_id].protoName); } #endif if((matching_protocol_id != NDPI_PROTOCOL_UNKNOWN) && (!ndpi_is_more_generic_protocol(packet->detected_protocol_stack[0], matching_protocol_id))) { /* Move the protocol on slot 0 down one position */ packet->detected_protocol_stack[1] = master_protocol_id, packet->detected_protocol_stack[0] = matching_protocol_id; flow->detected_protocol_stack[0] = packet->detected_protocol_stack[0], flow->detected_protocol_stack[1] = packet->detected_protocol_stack[1]; if(flow->category == NDPI_PROTOCOL_CATEGORY_UNSPECIFIED) flow->category = ret_match->protocol_category; return(packet->detected_protocol_stack[0]); } #ifdef DEBUG string_to_match[string_to_match_len] = '\0'; NDPI_LOG_DBG2(ndpi_str, "[NTOP] Unable to find a match for '%s'\n", string_to_match); #endif ret_match->protocol_id = NDPI_PROTOCOL_UNKNOWN, ret_match->protocol_category = NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, ret_match->protocol_breed = NDPI_PROTOCOL_UNRATED; return(NDPI_PROTOCOL_UNKNOWN); } /* ****************************************************** */ u_int16_t ndpi_match_host_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int16_t master_protocol_id) { u_int16_t rc = ndpi_automa_match_string_subprotocol(ndpi_str, flow, string_to_match, string_to_match_len, master_protocol_id, ret_match, 1); ndpi_protocol_category_t id = ret_match->protocol_category; if(ndpi_get_custom_category_match(ndpi_str, string_to_match, string_to_match_len, &id) != -1) { /* if(id != -1) */ { flow->category = ret_match->protocol_category = id; rc = master_protocol_id; } } return(rc); } /* **************************************** */ int ndpi_match_hostname_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t master_protocol, char *name, u_int name_len) { ndpi_protocol_match_result ret_match; u_int16_t subproto, what_len; char *what; if((name_len > 2) && (name[0] == '*') && (name[1] == '.')) what = &name[1], what_len = name_len - 1; else what = name, what_len = name_len; subproto = ndpi_match_host_subprotocol(ndpi_struct, flow, what, what_len, &ret_match, master_protocol); if(subproto != NDPI_PROTOCOL_UNKNOWN) { ndpi_set_detected_protocol(ndpi_struct, flow, subproto, master_protocol); ndpi_int_change_category(ndpi_struct, flow, ret_match.protocol_category); return(1); } else return(0); } /* ****************************************************** */ u_int16_t ndpi_match_content_subprotocol(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *string_to_match, u_int string_to_match_len, ndpi_protocol_match_result *ret_match, u_int16_t master_protocol_id) { return(ndpi_automa_match_string_subprotocol(ndpi_str, flow, string_to_match, string_to_match_len, master_protocol_id, ret_match, 0)); } /* ****************************************************** */ int ndpi_match_bigram(struct ndpi_detection_module_struct *ndpi_str, ndpi_automa *automa, char *bigram_to_match) { AC_TEXT_t ac_input_text; AC_REP_t match = {NDPI_PROTOCOL_UNKNOWN, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED, NDPI_PROTOCOL_UNRATED}; int rc; if((automa->ac_automa == NULL) || (bigram_to_match == NULL)) return(-1); if(!automa->ac_automa_finalized) { #if 1 ndpi_finalize_initalization(ndpi_str); #else printf("[%s:%d] [NDPI] Internal error: please call ndpi_finalize_initalization()\n", __FILE__, __LINE__); return(0); /* No matches */ #endif } ac_input_text.astring = bigram_to_match, ac_input_text.length = 2; rc = ac_automata_search(((AC_AUTOMATA_t *) automa->ac_automa), &ac_input_text, &match); /* As ac_automata_search can detect partial matches and continue the search process in case rc == 0 (i.e. no match), we need to check if there is a partial match and in this case return it */ if((rc == 0) && (match.number != 0)) rc = 1; return(rc ? match.number : 0); } /* ****************************************************** */ void ndpi_free_flow(struct ndpi_flow_struct *flow) { if(flow) { if(flow->http.url) ndpi_free(flow->http.url); if(flow->http.content_type) ndpi_free(flow->http.content_type); if(flow->http.user_agent) ndpi_free(flow->http.user_agent); if(flow->kerberos_buf.pktbuf) ndpi_free(flow->kerberos_buf.pktbuf); if(flow_is_proto(flow, NDPI_PROTOCOL_TLS)) { if(flow->protos.stun_ssl.ssl.server_names) ndpi_free(flow->protos.stun_ssl.ssl.server_names); if(flow->protos.stun_ssl.ssl.alpn) ndpi_free(flow->protos.stun_ssl.ssl.alpn); if(flow->protos.stun_ssl.ssl.tls_supported_versions) ndpi_free(flow->protos.stun_ssl.ssl.tls_supported_versions); if(flow->protos.stun_ssl.ssl.issuerDN) ndpi_free(flow->protos.stun_ssl.ssl.issuerDN); if(flow->protos.stun_ssl.ssl.subjectDN) ndpi_free(flow->protos.stun_ssl.ssl.subjectDN); if(flow->l4.tcp.tls.srv_cert_fingerprint_ctx) ndpi_free(flow->l4.tcp.tls.srv_cert_fingerprint_ctx); if(flow->protos.stun_ssl.ssl.encrypted_sni.esni) ndpi_free(flow->protos.stun_ssl.ssl.encrypted_sni.esni); } if(flow->l4_proto == IPPROTO_TCP) { if(flow->l4.tcp.tls.message.buffer) ndpi_free(flow->l4.tcp.tls.message.buffer); } ndpi_free(flow); } } /* ****************************************************** */ char *ndpi_revision() { return(NDPI_GIT_RELEASE); } /* ****************************************************** */ #ifdef WIN32 /* https://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows */ int gettimeofday(struct timeval *tp, struct timezone *tzp) { // Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's // This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC) // until 00:00:00 January 1, 1970 static const uint64_t EPOCH = ((uint64_t) 116444736000000000ULL); SYSTEMTIME system_time; FILETIME file_time; uint64_t time; GetSystemTime(&system_time); SystemTimeToFileTime(&system_time, &file_time); time = ((uint64_t) file_time.dwLowDateTime); time += ((uint64_t) file_time.dwHighDateTime) << 32; tp->tv_sec = (long) ((time - EPOCH) / 10000000L); tp->tv_usec = (long) (system_time.wMilliseconds * 1000); return(0); } #endif int NDPI_BITMASK_COMPARE(NDPI_PROTOCOL_BITMASK a, NDPI_PROTOCOL_BITMASK b) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) { if(a.fds_bits[i] & b.fds_bits[i]) return(1); } return(0); } #ifdef CODE_UNUSED int NDPI_BITMASK_IS_EMPTY(NDPI_PROTOCOL_BITMASK a) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) if(a.fds_bits[i] != 0) return(0); return(1); } void NDPI_DUMP_BITMASK(NDPI_PROTOCOL_BITMASK a) { int i; for (i = 0; i < NDPI_NUM_FDS_BITS; i++) printf("[%d=%u]", i, a.fds_bits[i]); printf("\n"); } #endif u_int16_t ndpi_get_api_version() { return(NDPI_API_VERSION); } ndpi_proto_defaults_t *ndpi_get_proto_defaults(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->proto_defaults); } u_int ndpi_get_ndpi_num_supported_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_supported_protocols); } u_int ndpi_get_ndpi_num_custom_protocols(struct ndpi_detection_module_struct *ndpi_str) { return(ndpi_str->ndpi_num_custom_protocols); } u_int ndpi_get_ndpi_detection_module_size() { return(sizeof(struct ndpi_detection_module_struct)); } void ndpi_set_log_level(struct ndpi_detection_module_struct *ndpi_str, u_int l){ ndpi_str->ndpi_log_level = l; } /* ******************************************************************** */ /* LRU cache */ struct ndpi_lru_cache *ndpi_lru_cache_init(u_int32_t num_entries) { struct ndpi_lru_cache *c = (struct ndpi_lru_cache *) ndpi_malloc(sizeof(struct ndpi_lru_cache)); if(!c) return(NULL); c->entries = (struct ndpi_lru_cache_entry *) ndpi_calloc(num_entries, sizeof(struct ndpi_lru_cache_entry)); if(!c->entries) { ndpi_free(c); return(NULL); } else c->num_entries = num_entries; return(c); } void ndpi_lru_free_cache(struct ndpi_lru_cache *c) { ndpi_free(c->entries); ndpi_free(c); } u_int8_t ndpi_lru_find_cache(struct ndpi_lru_cache *c, u_int32_t key, u_int16_t *value, u_int8_t clean_key_when_found) { u_int32_t slot = key % c->num_entries; if(c->entries[slot].is_full) { *value = c->entries[slot].value; if(clean_key_when_found) c->entries[slot].is_full = 0; return(1); } else return(0); } void ndpi_lru_add_to_cache(struct ndpi_lru_cache *c, u_int32_t key, u_int16_t value) { u_int32_t slot = key % c->num_entries; c->entries[slot].is_full = 1, c->entries[slot].key = key, c->entries[slot].value = value; } /* ******************************************************************** */ /* This function tells if it's possible to further dissect a given flow 0 - All possible dissection has been completed 1 - Additional dissection is possible */ u_int8_t ndpi_extra_dissection_possible(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow) { u_int16_t proto = flow->detected_protocol_stack[1] ? flow->detected_protocol_stack[1] : flow->detected_protocol_stack[0]; #if 0 printf("[DEBUG] %s(%u.%u): %u\n", __FUNCTION__, flow->detected_protocol_stack[0], flow->detected_protocol_stack[1], proto); #endif switch (proto) { case NDPI_PROTOCOL_TLS: if(!flow->l4.tcp.tls.certificate_processed) return(1); /* TODO: add check for TLS 1.3 */ break; case NDPI_PROTOCOL_HTTP: if((flow->host_server_name[0] == '\0') || (flow->http.response_status_code == 0)) return(1); break; case NDPI_PROTOCOL_DNS: if(flow->protos.dns.num_answers == 0) return(1); break; case NDPI_PROTOCOL_FTP_CONTROL: case NDPI_PROTOCOL_MAIL_POP: case NDPI_PROTOCOL_MAIL_IMAP: case NDPI_PROTOCOL_MAIL_SMTP: if(flow->protos.ftp_imap_pop_smtp.password[0] == '\0') return(1); break; case NDPI_PROTOCOL_SSH: if((flow->protos.ssh.hassh_client[0] == '\0') || (flow->protos.ssh.hassh_server[0] == '\0')) return(1); break; case NDPI_PROTOCOL_TELNET: if(!flow->protos.telnet.password_detected) return(1); break; } return(0); } /* ******************************************************************** */ const char *ndpi_get_l4_proto_name(ndpi_l4_proto_info proto) { switch (proto) { case ndpi_l4_proto_unknown: return(""); break; case ndpi_l4_proto_tcp_only: return("TCP"); break; case ndpi_l4_proto_udp_only: return("UDP"); break; case ndpi_l4_proto_tcp_and_udp: return("TCP/UDP"); break; } return(""); } /* ******************************************************************** */ ndpi_l4_proto_info ndpi_get_l4_proto_info(struct ndpi_detection_module_struct *ndpi_struct, u_int16_t ndpi_proto_id) { if(ndpi_proto_id < ndpi_struct->ndpi_num_supported_protocols) { u_int16_t idx = ndpi_struct->proto_defaults[ndpi_proto_id].protoIdx; NDPI_SELECTION_BITMASK_PROTOCOL_SIZE bm = ndpi_struct->callback_buffer[idx].ndpi_selection_bitmask; if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP) return(ndpi_l4_proto_tcp_only); else if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_UDP) return(ndpi_l4_proto_udp_only); else if(bm & NDPI_SELECTION_BITMASK_PROTOCOL_INT_TCP_OR_UDP) return(ndpi_l4_proto_tcp_and_udp); } return(ndpi_l4_proto_unknown); /* default */ } /* ******************************************************************** */ ndpi_ptree_t *ndpi_ptree_create(void) { ndpi_ptree_t *tree = (ndpi_ptree_t *) ndpi_malloc(sizeof(ndpi_ptree_t)); if(tree) { tree->v4 = ndpi_New_Patricia(32); tree->v6 = ndpi_New_Patricia(128); if((!tree->v4) || (!tree->v6)) { ndpi_ptree_destroy(tree); return(NULL); } } return(tree); } /* ******************************************************************** */ void ndpi_ptree_destroy(ndpi_ptree_t *tree) { if(tree) { if(tree->v4) ndpi_Destroy_Patricia(tree->v4, free_ptree_data); if(tree->v6) ndpi_Destroy_Patricia(tree->v6, free_ptree_data); ndpi_free(tree); } } /* ******************************************************************** */ int ndpi_ptree_insert(ndpi_ptree_t *tree, const ndpi_ip_addr_t *addr, u_int8_t bits, uint user_data) { u_int8_t is_v6 = ndpi_is_ipv6(addr); patricia_tree_t *ptree = is_v6 ? tree->v6 : tree->v4; prefix_t prefix; patricia_node_t *node; if(bits > ptree->maxbits) return(-1); if(is_v6) fill_prefix_v6(&prefix, (const struct in6_addr *) &addr->ipv6, bits, ptree->maxbits); else fill_prefix_v4(&prefix, (const struct in_addr *) &addr->ipv4, bits, ptree->maxbits); /* Verify that the node does not already exist */ node = ndpi_patricia_search_best(ptree, &prefix); if(node && (node->prefix->bitlen == bits)) return(-2); node = ndpi_patricia_lookup(ptree, &prefix); if(node != NULL) { node->value.uv.user_value = user_data, node->value.uv.additional_user_value = 0; return(0); } return(-3); } /* ******************************************************************** */ int ndpi_ptree_match_addr(ndpi_ptree_t *tree, const ndpi_ip_addr_t *addr, uint *user_data) { u_int8_t is_v6 = ndpi_is_ipv6(addr); patricia_tree_t *ptree = is_v6 ? tree->v6 : tree->v4; prefix_t prefix; patricia_node_t *node; int bits = ptree->maxbits; if(is_v6) fill_prefix_v6(&prefix, (const struct in6_addr *) &addr->ipv6, bits, ptree->maxbits); else fill_prefix_v4(&prefix, (const struct in_addr *) &addr->ipv4, bits, ptree->maxbits); node = ndpi_patricia_search_best(ptree, &prefix); if(node) { *user_data = node->value.uv.user_value; return(0); } return(-1); } /* ******************************************************************** */ void ndpi_md5(const u_char *data, size_t data_len, u_char hash[16]) { ndpi_MD5_CTX ctx; ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, data, data_len); ndpi_MD5Final(hash, &ctx); } /* ******************************************************************** */ static int enough(int a, int b) { u_int8_t percentage = 20; if(b == 0) return(0); if(a == 0) return(1); if(b > (((a+1)*percentage)/100)) return(1); return(0); } /* ******************************************************************** */ // #define DGA_DEBUG 1 int ndpi_check_dga_name(struct ndpi_detection_module_struct *ndpi_str, struct ndpi_flow_struct *flow, char *name) { int len, rc = 0; len = strlen(name); if(len >= 5) { int i, j, num_found = 0, num_impossible = 0, num_bigram_checks = 0, num_digits = 0, num_vowels = 0, num_words = 0; char tmp[128], *word, *tok_tmp; len = snprintf(tmp, sizeof(tmp)-1, "%s", name); if(len < 0) return(0); for(i=0, j=0; (i<len) && (j<(sizeof(tmp)-1)); i++) { tmp[j++] = tolower(name[i]); } tmp[j] = '\0'; len = j; for(word = strtok_r(tmp, ".", &tok_tmp); ; word = strtok_r(NULL, ".", &tok_tmp)) { if(!word) break; num_words++; if(strlen(word) < 3) continue; #ifdef DGA_DEBUG printf("-> %s [%s][len: %u]\n", word, name, (unsigned int)strlen(word)); #endif for(i = 0; word[i+1] != '\0'; i++) { if(isdigit(word[i])) { num_digits++; // if(!isdigit(word[i+1])) num_impossible++; continue; } switch(word[i]) { case '_': case '-': case ':': continue; break; case '.': continue; break; } switch(word[i]) { case 'a': case 'e': case 'i': case 'o': case 'u': num_vowels++; break; } if(isdigit(word[i+1])) { num_digits++; // num_impossible++; continue; } num_bigram_checks++; if(ndpi_match_bigram(ndpi_str, &ndpi_str->bigrams_automa, &word[i])) { num_found++; } else { if(ndpi_match_bigram(ndpi_str, &ndpi_str->impossible_bigrams_automa, &word[i])) { #ifdef DGA_DEBUG printf("IMPOSSIBLE %s\n", &word[i]); #endif num_impossible++; } } } /* for */ } /* for */ #ifdef DGA_DEBUG printf("[num_found: %u][num_impossible: %u][num_digits: %u][num_bigram_checks: %u][num_vowels: %u/%u]\n", num_found, num_impossible, num_digits, num_bigram_checks, num_vowels, j-num_vowels); #endif if(num_bigram_checks && ((num_found == 0) || ((num_digits > 5) && (num_words <= 3)) || enough(num_found, num_impossible))) rc = 1; if(rc && flow) NDPI_SET_BIT(flow->risk, NDPI_SUSPICIOUS_DGA_DOMAIN); #ifdef DGA_DEBUG if(rc) printf("DGA %s [num_found: %u][num_impossible: %u]\n", name, num_found, num_impossible); #endif } return(rc); }
./CrossVul/dataset_final_sorted/CWE-416/c/good_4218_0
crossvul-cpp_data_good_148_0
/* radare - LGPL - Copyright 2011-2018 - pancake, Roc Valles, condret, killabyte */ #if 0 http://www.atmel.com/images/atmel-0856-avr-instruction-set-manual.pdf https://en.wikipedia.org/wiki/Atmel_AVR_instruction_set #endif #include <string.h> #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_asm.h> #include <r_anal.h> static RDESContext desctx; typedef struct _cpu_const_tag { const char *const key; ut8 type; ut32 value; ut8 size; } CPU_CONST; #define CPU_CONST_NONE 0 #define CPU_CONST_PARAM 1 #define CPU_CONST_REG 2 typedef struct _cpu_model_tag { const char *const model; int pc; char *inherit; struct _cpu_model_tag *inherit_cpu_p; CPU_CONST *consts[10]; } CPU_MODEL; typedef void (*inst_handler_t) (RAnal *anal, RAnalOp *op, const ut8 *buf, int len, int *fail, CPU_MODEL *cpu); typedef struct _opcodes_tag_ { const char *const name; int mask; int selector; inst_handler_t handler; int cycles; int size; ut64 type; } OPCODE_DESC; static OPCODE_DESC* avr_op_analyze(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *buf, int len, CPU_MODEL *cpu); #define CPU_MODEL_DECL(model, pc, consts) \ { \ model, \ pc, \ consts \ } #define MASK(bits) ((bits) == 32 ? 0xffffffff : (~((~((ut32) 0)) << (bits)))) #define CPU_PC_MASK(cpu) MASK((cpu)->pc) #define CPU_PC_SIZE(cpu) ((((cpu)->pc) >> 3) + ((((cpu)->pc) & 0x07) ? 1 : 0)) #define INST_HANDLER(OPCODE_NAME) static void _inst__ ## OPCODE_NAME (RAnal *anal, RAnalOp *op, const ut8 *buf, int len, int *fail, CPU_MODEL *cpu) #define INST_DECL(OP, M, SL, C, SZ, T) { #OP, (M), (SL), _inst__ ## OP, (C), (SZ), R_ANAL_OP_TYPE_ ## T } #define INST_LAST { "unknown", 0, 0, (void *) 0, 2, 1, R_ANAL_OP_TYPE_UNK } #define INST_CALL(OPCODE_NAME) _inst__ ## OPCODE_NAME (anal, op, buf, len, fail, cpu) #define INST_INVALID { *fail = 1; return; } #define INST_ASSERT(x) { if (!(x)) { INST_INVALID; } } #define ESIL_A(e, ...) r_strbuf_appendf (&op->esil, e, ##__VA_ARGS__) #define STR_BEGINS(in, s) strncasecmp (in, s, strlen (s)) // Following IO definitions are valid for: // ATmega8 // ATmega88 CPU_CONST cpu_reg_common[] = { { "spl", CPU_CONST_REG, 0x3d, sizeof (ut8) }, { "sph", CPU_CONST_REG, 0x3e, sizeof (ut8) }, { "sreg", CPU_CONST_REG, 0x3f, sizeof (ut8) }, { "spmcsr", CPU_CONST_REG, 0x37, sizeof (ut8) }, { NULL, 0, 0, 0 }, }; CPU_CONST cpu_memsize_common[] = { { "eeprom_size", CPU_CONST_PARAM, 512, sizeof (ut32) }, { "io_size", CPU_CONST_PARAM, 0x40, sizeof (ut32) }, { "sram_start", CPU_CONST_PARAM, 0x60, sizeof (ut32) }, { "sram_size", CPU_CONST_PARAM, 1024, sizeof (ut32) }, { NULL, 0, 0, 0 }, }; CPU_CONST cpu_memsize_m640_m1280m_m1281_m2560_m2561[] = { { "eeprom_size", CPU_CONST_PARAM, 512, sizeof (ut32) }, { "io_size", CPU_CONST_PARAM, 0x1ff, sizeof (ut32) }, { "sram_start", CPU_CONST_PARAM, 0x200, sizeof (ut32) }, { "sram_size", CPU_CONST_PARAM, 0x2000, sizeof (ut32) }, { NULL, 0, 0, 0 }, }; CPU_CONST cpu_memsize_xmega128a4u[] = { { "eeprom_size", CPU_CONST_PARAM, 0x800, sizeof (ut32) }, { "io_size", CPU_CONST_PARAM, 0x1000, sizeof (ut32) }, { "sram_start", CPU_CONST_PARAM, 0x800, sizeof (ut32) }, { "sram_size", CPU_CONST_PARAM, 0x2000, sizeof (ut32) }, { NULL, 0, 0, 0 }, }; CPU_CONST cpu_pagesize_5_bits[] = { { "page_size", CPU_CONST_PARAM, 5, sizeof (ut8) }, { NULL, 0, 0, 0 }, }; CPU_CONST cpu_pagesize_7_bits[] = { { "page_size", CPU_CONST_PARAM, 7, sizeof (ut8) }, { NULL, 0, 0, 0 }, }; CPU_MODEL cpu_models[] = { { .model = "ATmega640", .pc = 15, .consts = { cpu_reg_common, cpu_memsize_m640_m1280m_m1281_m2560_m2561, cpu_pagesize_7_bits, NULL }, }, { .model = "ATxmega128a4u", .pc = 17, .consts = { cpu_reg_common, cpu_memsize_xmega128a4u, cpu_pagesize_7_bits, NULL } }, { .model = "ATmega1280", .pc = 16, .inherit = "ATmega640" }, { .model = "ATmega1281", .pc = 16, .inherit = "ATmega640" }, { .model = "ATmega2560", .pc = 17, .inherit = "ATmega640" }, { .model = "ATmega2561", .pc = 17, .inherit = "ATmega640" }, { .model = "ATmega88", .pc = 8, .inherit = "ATmega8" }, // CPU_MODEL_DECL ("ATmega168", 13, 512, 512), // last model is the default AVR - ATmega8 forever! { .model = "ATmega8", .pc = 13, .consts = { cpu_reg_common, cpu_memsize_common, cpu_pagesize_5_bits, NULL } }, }; static CPU_MODEL *get_cpu_model(char *model); static CPU_MODEL *__get_cpu_model_recursive(char *model) { CPU_MODEL *cpu = NULL; for (cpu = cpu_models; cpu < cpu_models + ((sizeof (cpu_models) / sizeof (CPU_MODEL))) - 1; cpu++) { if (!strcasecmp (model, cpu->model)) { break; } } // fix inheritance tree if (cpu->inherit && !cpu->inherit_cpu_p) { cpu->inherit_cpu_p = get_cpu_model (cpu->inherit); if (!cpu->inherit_cpu_p) { eprintf ("ERROR: Cannot inherit from unknown CPU model '%s'.\n", cpu->inherit); } } return cpu; } static CPU_MODEL *get_cpu_model(char *model) { static CPU_MODEL *cpu = NULL; // cached value? if (cpu && !strcasecmp (model, cpu->model)) return cpu; // do the real search cpu = __get_cpu_model_recursive (model); return cpu; } static ut32 const_get_value(CPU_CONST *c) { return c ? MASK (c->size * 8) & c->value : 0; } static CPU_CONST *const_by_name(CPU_MODEL *cpu, int type, char *c) { CPU_CONST **clist, *citem; for (clist = cpu->consts; *clist; clist++) { for (citem = *clist; citem->key; citem++) { if (!strcmp (c, citem->key) && (type == CPU_CONST_NONE || type == citem->type)) { return citem; } } } if (cpu->inherit_cpu_p) return const_by_name (cpu->inherit_cpu_p, type, c); eprintf ("ERROR: CONSTANT key[%s] NOT FOUND.\n", c); return NULL; } static int __esil_pop_argument(RAnalEsil *esil, ut64 *v) { char *t = r_anal_esil_pop (esil); if (!t || !r_anal_esil_get_parm (esil, t, v)) { free (t); return false; } free (t); return true; } static CPU_CONST *const_by_value(CPU_MODEL *cpu, int type, ut32 v) { CPU_CONST **clist, *citem; for (clist = cpu->consts; *clist; clist++) { for (citem = *clist; citem && citem->key; citem++) { if (citem->value == (MASK (citem->size * 8) & v) && (type == CPU_CONST_NONE || type == citem->type)) { return citem; } } } if (cpu->inherit_cpu_p) return const_by_value (cpu->inherit_cpu_p, type, v); return NULL; } static RStrBuf *__generic_io_dest(ut8 port, int write, CPU_MODEL *cpu) { RStrBuf *r = r_strbuf_new (""); CPU_CONST *c = const_by_value (cpu, CPU_CONST_REG, port); if (c != NULL) { r_strbuf_set (r, c->key); if (write) { r_strbuf_append (r, ",="); } } else { r_strbuf_setf (r, "_io,%d,+,%s[1]", port, write ? "=" : ""); } return r; } static void __generic_bitop_flags(RAnalOp *op) { ESIL_A ("0,vf,=,"); // V ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("vf,nf,^,sf,=,"); // S } static void __generic_ld_st(RAnalOp *op, char *mem, char ireg, int use_ramp, int prepostdec, int offset, int st) { if (ireg) { // preincrement index register if (prepostdec < 0) { ESIL_A ("1,%c,-,%c,=,", ireg, ireg); } // set register index address ESIL_A ("%c,", ireg); // add offset if (offset != 0) { ESIL_A ("%d,+,", offset); } } else { ESIL_A ("%d,", offset); } if (use_ramp) { ESIL_A ("16,ramp%c,<<,+,", ireg ? ireg : 'd'); } // set SRAM base address ESIL_A ("_%s,+,", mem); // read/write from SRAM ESIL_A ("%s[1],", st ? "=" : ""); // postincrement index register if (ireg && prepostdec > 0) { ESIL_A ("1,%c,+,%c,=,", ireg, ireg); } } static void __generic_pop(RAnalOp *op, int sz) { if (sz > 1) { ESIL_A ("1,sp,+,_ram,+,"); // calc SRAM(sp+1) ESIL_A ("[%d],", sz); // read value ESIL_A ("%d,sp,+=,", sz); // sp += item_size } else { ESIL_A ("1,sp,+=," // increment stack pointer "sp,_ram,+,[1],"); // load SRAM[sp] } } static void __generic_push(RAnalOp *op, int sz) { ESIL_A ("sp,_ram,+,"); // calc pointer SRAM(sp) if (sz > 1) { ESIL_A ("-%d,+,", sz - 1); // dec SP by 'sz' } ESIL_A ("=[%d],", sz); // store value in stack ESIL_A ("-%d,sp,+=,", sz); // decrement stack pointer } static void __generic_add_update_flags(RAnalOp *op, char t_d, ut64 v_d, char t_rk, ut64 v_rk) { RStrBuf *d_strbuf, *rk_strbuf; char *d, *rk; d_strbuf = r_strbuf_new (NULL); rk_strbuf = r_strbuf_new (NULL); r_strbuf_setf (d_strbuf, t_d == 'r' ? "r%d" : "%" PFMT64d, v_d); r_strbuf_setf (rk_strbuf, t_rk == 'r' ? "r%d" : "%" PFMT64d, v_rk); d = r_strbuf_get(d_strbuf); rk = r_strbuf_get(rk_strbuf); ESIL_A ("%s,0x08,&,!,!," "%s,0x08,&,!,!," "&," // H "%s,0x08,&,!,!," "0,RPICK,0x08,&,!," "&," "%s,0x08,&,!,!," "0,RPICK,0x08,&,!," "&," "|,|,hf,=,", d, rk, rk, d); ESIL_A ("%s,0x80,&,!,!," "%s,0x80,&,!,!," "&," // V "" "0,RPICK,0x80,&,!," "&," "%s,0x80,&,!," "%s,0x80,&,!," "&," "" "0,RPICK,0x80,&,!,!," "&," "|,vf,=,", d, rk, d, rk); ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("%s,0x80,&,!,!," "%s,0x80,&,!,!," "&," // C "%s,0x80,&,!,!," "0,RPICK,0x80,&,!," "&," "%s,0x80,&,!,!," "0,RPICK,0x80,&,!," "&," "|,|,cf,=,", d, rk, rk, d); ESIL_A ("vf,nf,^,sf,=,"); // S r_strbuf_free (d_strbuf); r_strbuf_free (rk_strbuf); } static void __generic_add_update_flags_rr(RAnalOp *op, int d, int r) { __generic_add_update_flags(op, 'r', d, 'r', r); } static void __generic_sub_update_flags(RAnalOp *op, char t_d, ut64 v_d, char t_rk, ut64 v_rk, int carry) { RStrBuf *d_strbuf, *rk_strbuf; char *d, *rk; d_strbuf = r_strbuf_new (NULL); rk_strbuf = r_strbuf_new (NULL); r_strbuf_setf (d_strbuf, t_d == 'r' ? "r%d" : "%" PFMT64d, v_d); r_strbuf_setf (rk_strbuf, t_rk == 'r' ? "r%d" : "%" PFMT64d, v_rk); d = r_strbuf_get(d_strbuf); rk = r_strbuf_get(rk_strbuf); ESIL_A ("%s,0x08,&,!," "%s,0x08,&,!,!," "&," // H "%s,0x08,&,!,!," "0,RPICK,0x08,&,!,!," "&," "%s,0x08,&,!," "0,RPICK,0x08,&,!,!," "&," "|,|,hf,=,", d, rk, rk, d); ESIL_A ("%s,0x80,&,!,!," "%s,0x80,&,!," "&," // V "" "0,RPICK,0x80,&,!," "&," "%s,0x80,&,!," "%s,0x80,&,!,!," "&," "" "0,RPICK,0x80,&,!,!," "&," "|,vf,=,", d, rk, d, rk); ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N if (carry) ESIL_A ("0,RPICK,!,zf,&,zf,=,"); // Z else ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("%s,0x80,&,!," "%s,0x80,&,!,!," "&," // C "%s,0x80,&,!,!," "0,RPICK,0x80,&,!,!," "&," "%s,0x80,&,!," "0,RPICK,0x80,&,!,!," "&," "|,|,cf,=,", d, rk, rk, d); ESIL_A ("vf,nf,^,sf,=,"); // S r_strbuf_free (d_strbuf); r_strbuf_free (rk_strbuf); } static void __generic_sub_update_flags_rr(RAnalOp *op, int d, int r, int carry) { __generic_sub_update_flags(op, 'r', d, 'r', r, carry); } static void __generic_sub_update_flags_rk(RAnalOp *op, int d, int k, int carry) { __generic_sub_update_flags(op, 'r', d, 'k', k, carry); } INST_HANDLER (adc) { // ADC Rd, Rr // ROL Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); int r = (buf[0] & 0xf) | ((buf[1] & 2) << 3); ESIL_A ("r%d,cf,+,r%d,+,", r, d); // Rd + Rr + C __generic_add_update_flags_rr(op, d, r); // FLAGS ESIL_A ("r%d,=,", d); // Rd = result } INST_HANDLER (add) { // ADD Rd, Rr // LSL Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); int r = (buf[0] & 0xf) | ((buf[1] & 2) << 3); ESIL_A ("r%d,r%d,+,", r, d); // Rd + Rr __generic_add_update_flags_rr(op, d, r); // FLAGS ESIL_A ("r%d,=,", d); // Rd = result } INST_HANDLER (adiw) { // ADIW Rd+1:Rd, K int d = ((buf[0] & 0x30) >> 3) + 24; int k = (buf[0] & 0xf) | ((buf[0] >> 2) & 0x30); op->val = k; ESIL_A ("r%d:r%d,%d,+,", d + 1, d, k); // Rd+1:Rd + Rr // FLAGS: ESIL_A ("r%d,0x80,&,!," // V "0,RPICK,0x8000,&,!,!," "&,vf,=,", d + 1); ESIL_A ("0,RPICK,0x8000,&,!,!,nf,=,"); // N ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("r%d,0x80,&,!,!," // C "0,RPICK,0x8000,&,!," "&,cf,=,", d + 1); ESIL_A ("vf,nf,^,sf,=,"); // S ESIL_A ("r%d:r%d,=,", d + 1, d); // Rd = result } INST_HANDLER (and) { // AND Rd, Rr // TST Rd if (len < 2) { return; } int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); int r = (buf[0] & 0xf) | ((buf[1] & 2) << 3); ESIL_A ("r%d,r%d,&,", r, d); // 0: Rd & Rr __generic_bitop_flags (op); // up flags ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (andi) { // ANDI Rd, K // CBR Rd, K (= ANDI Rd, 1-K) if (len < 2) { return; } int d = ((buf[0] >> 4) & 0xf) + 16; int k = ((buf[1] & 0x0f) << 4) | (buf[0] & 0x0f); op->val = k; ESIL_A ("%d,r%d,&,", k, d); // 0: Rd & Rr __generic_bitop_flags (op); // up flags ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (asr) { // ASR Rd if (len < 2) { return; } int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); ESIL_A ("1,r%d,>>,r%d,0x80,&,|,", d, d); // 0: R=(Rd >> 1) | Rd7 ESIL_A ("r%d,0x1,&,!,!,cf,=,", d); // C = Rd0 ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("nf,cf,^,vf,=,"); // V ESIL_A ("nf,vf,^,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = R } INST_HANDLER (bclr) { // BCLR s // CLC // CLH // CLI // CLN // CLR // CLS // CLT // CLV // CLZ int s = (buf[0] >> 4) & 0x7; ESIL_A ("0xff,%d,1,<<,^,sreg,&=,", s); } INST_HANDLER (bld) { // BLD Rd, b if (len < 2) { return; } int d = ((buf[1] & 0x01) << 4) | ((buf[0] >> 4) & 0xf); int b = buf[0] & 0x7; ESIL_A ("r%d,%d,1,<<,0xff,^,&,", d, b); // Rd/b = 0 ESIL_A ("%d,tf,<<,|,r%d,=,", b, d); // Rd/b |= T<<b } INST_HANDLER (brbx) { // BRBC s, k // BRBS s, k // BRBC/S 0: BRCC BRCS // BRSH BRLO // BRBC/S 1: BREQ BRNE // BRBC/S 2: BRPL BRMI // BRBC/S 3: BRVC BRVS // BRBC/S 4: BRGE BRLT // BRBC/S 5: BRHC BRHS // BRBC/S 6: BRTC BRTS // BRBC/S 7: BRID BRIE int s = buf[0] & 0x7; op->jump = op->addr + ((((buf[1] & 0x03) << 6) | ((buf[0] & 0xf8) >> 2)) | (buf[1] & 0x2 ? ~((int) 0x7f) : 0)) + 2; op->fail = op->addr + op->size; op->cycles = 1; // XXX: This is a bug, because depends on eval state, // so it cannot be really be known until this // instruction is executed by the ESIL interpreter!!! // In case of evaluating to true, this instruction // needs 2 cycles, elsewhere it needs only 1 cycle. ESIL_A ("%d,1,<<,sreg,&,", s); // SREG(s) ESIL_A (buf[1] & 0x4 ? "!," // BRBC => branch if cleared : "!,!,"); // BRBS => branch if set ESIL_A ("?{,%"PFMT64d",pc,=,},", op->jump); // ?true => jmp } INST_HANDLER (break) { // BREAK ESIL_A ("BREAK"); } INST_HANDLER (bset) { // BSET s // SEC // SEH // SEI // SEN // SER // SES // SET // SEV // SEZ int s = (buf[0] >> 4) & 0x7; ESIL_A ("%d,1,<<,sreg,|=,", s); } INST_HANDLER (bst) { // BST Rd, b if (len < 2) { return; } ESIL_A ("r%d,%d,1,<<,&,!,!,tf,=,", // tf = Rd/b ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf), // r buf[0] & 0x7); // b } INST_HANDLER (call) { // CALL k if (len < 4) { return; } op->jump = (buf[2] << 1) | (buf[3] << 9) | (buf[1] & 0x01) << 23 | (buf[0] & 0x01) << 17 | (buf[0] & 0xf0) << 14; op->fail = op->addr + op->size; op->cycles = cpu->pc <= 16 ? 3 : 4; if (!STR_BEGINS (cpu->model, "ATxmega")) { op->cycles--; // AT*mega optimizes one cycle } ESIL_A ("pc,"); // esil is already pointing to // next instruction (@ret) __generic_push (op, CPU_PC_SIZE (cpu)); // push @ret in stack ESIL_A ("%"PFMT64d",pc,=,", op->jump); // jump! } INST_HANDLER (cbi) { // CBI A, b int a = (buf[0] >> 3) & 0x1f; int b = buf[0] & 0x07; RStrBuf *io_port; op->family = R_ANAL_OP_FAMILY_IO; op->type2 = 1; op->val = a; // read port a and clear bit b io_port = __generic_io_dest (a, 0, cpu); ESIL_A ("0xff,%d,1,<<,^,%s,&,", b, io_port); r_strbuf_free (io_port); // write result to port a io_port = __generic_io_dest (a, 1, cpu); ESIL_A ("%s,", r_strbuf_get (io_port)); r_strbuf_free (io_port); } INST_HANDLER (com) { // COM Rd int r = ((buf[0] >> 4) & 0x0f) | ((buf[1] & 1) << 4); ESIL_A ("r%d,0xff,-,0xff,&,r%d,=,", r, r); // Rd = 0xFF-Rd // FLAGS: ESIL_A ("0,cf,=,"); // C __generic_bitop_flags (op); // ...rest... } INST_HANDLER (cp) { // CP Rd, Rr int r = (buf[0] & 0x0f) | ((buf[1] << 3) & 0x10); int d = ((buf[0] >> 4) & 0x0f) | ((buf[1] << 4) & 0x10); ESIL_A ("r%d,r%d,-,", r, d); // do Rd - Rr __generic_sub_update_flags_rr (op, d, r, 0); // FLAGS (no carry) } INST_HANDLER (cpc) { // CPC Rd, Rr int r = (buf[0] & 0x0f) | ((buf[1] << 3) & 0x10); int d = ((buf[0] >> 4) & 0x0f) | ((buf[1] << 4) & 0x10); ESIL_A ("cf,r%d,+,r%d,-,", r, d); // Rd - Rr - C __generic_sub_update_flags_rr (op, d, r, 1); // FLAGS (carry) } INST_HANDLER (cpi) { // CPI Rd, K int d = ((buf[0] >> 4) & 0xf) + 16; int k = (buf[0] & 0xf) | ((buf[1] & 0xf) << 4); ESIL_A ("%d,r%d,-,", k, d); // Rd - k __generic_sub_update_flags_rk (op, d, k, 0); // FLAGS (carry) } INST_HANDLER (cpse) { // CPSE Rd, Rr int r = (buf[0] & 0xf) | ((buf[1] & 0x2) << 3); int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); RAnalOp next_op = {0}; // calculate next instruction size (call recursively avr_op_analyze) // and free next_op's esil string (we dont need it now) avr_op_analyze (anal, &next_op, op->addr + op->size, buf + op->size, len - op->size, cpu); r_strbuf_fini (&next_op.esil); op->jump = op->addr + next_op.size + 2; // cycles op->cycles = 1; // XXX: This is a bug, because depends on eval state, // so it cannot be really be known until this // instruction is executed by the ESIL interpreter!!! // In case of evaluating to true, this instruction // needs 2/3 cycles, elsewhere it needs only 1 cycle. ESIL_A ("r%d,r%d,^,!,", r, d); // Rr == Rd ESIL_A ("?{,%"PFMT64d",pc,=,},", op->jump); // ?true => jmp } INST_HANDLER (dec) { // DEC Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); ESIL_A ("-1,r%d,+,", d); // --Rd // FLAGS: ESIL_A ("0,RPICK,0x7f,==,vf,=,"); // V ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("vf,nf,^,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (des) { // DES k if (desctx.round < 16) { //DES op->type = R_ANAL_OP_TYPE_CRYPTO; op->cycles = 1; //redo this r_strbuf_setf (&op->esil, "%d,des", desctx.round); } } INST_HANDLER (eijmp) { // EIJMP ut64 z, eind; // read z and eind for calculating jump address on runtime r_anal_esil_reg_read (anal->esil, "z", &z, NULL); r_anal_esil_reg_read (anal->esil, "eind", &eind, NULL); // real target address may change during execution, so this value will // be changing all the time op->jump = ((eind << 16) + z) << 1; // jump ESIL_A ("1,z,16,eind,<<,+,<<,pc,=,"); // cycles op->cycles = 2; } INST_HANDLER (eicall) { // EICALL // push pc in stack ESIL_A ("pc,"); // esil is already pointing to // next instruction (@ret) __generic_push (op, CPU_PC_SIZE (cpu)); // push @ret in stack // do a standard EIJMP INST_CALL (eijmp); // fix cycles op->cycles = !STR_BEGINS (cpu->model, "ATxmega") ? 3 : 4; } INST_HANDLER (elpm) { // ELPM // ELPM Rd // ELPM Rd, Z+ int d = ((buf[1] & 0xfe) == 0x90) ? ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf) // Rd : 0; // R0 ESIL_A ("16,rampz,<<,z,+,_prog,+,[1],"); // read RAMPZ:Z ESIL_A ("r%d,=,", d); // Rd = [1] if ((buf[1] & 0xfe) == 0x90 && (buf[0] & 0xf) == 0x7) { ESIL_A ("16,1,z,+,DUP,z,=,>>,1,&,rampz,+=,"); // ++(rampz:z) } } INST_HANDLER (eor) { // EOR Rd, Rr // CLR Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); int r = (buf[0] & 0xf) | ((buf[1] & 2) << 3); ESIL_A ("r%d,r%d,^,", r, d); // 0: Rd ^ Rr __generic_bitop_flags (op); // up flags ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (fmul) { // FMUL Rd, Rr int d = ((buf[0] >> 4) & 0x7) + 16; int r = (buf[0] & 0x7) + 16; ESIL_A ("1,r%d,r%d,*,<<,", r, d); // 0: (Rd*Rr)<<1 ESIL_A ("0xffff,&,"); // prevent overflow ESIL_A ("DUP,0xff,&,r0,=,"); // r0 = LO(0) ESIL_A ("8,0,RPICK,>>,0xff,&,r1,=,"); // r1 = HI(0) ESIL_A ("DUP,0x8000,&,!,!,cf,=,"); // C = R/16 ESIL_A ("DUP,!,zf,=,"); // Z = !R } INST_HANDLER (fmuls) { // FMULS Rd, Rr int d = ((buf[0] >> 4) & 0x7) + 16; int r = (buf[0] & 0x7) + 16; ESIL_A ("1,"); ESIL_A ("r%d,DUP,0x80,&,?{,0xffff00,|,},", d); // sign extension Rd ESIL_A ("r%d,DUP,0x80,&,?{,0xffff00,|,},", r); // sign extension Rr ESIL_A ("*,<<,", r, d); // 0: (Rd*Rr)<<1 ESIL_A ("0xffff,&,"); // prevent overflow ESIL_A ("DUP,0xff,&,r0,=,"); // r0 = LO(0) ESIL_A ("8,0,RPICK,>>,0xff,&,r1,=,"); // r1 = HI(0) ESIL_A ("DUP,0x8000,&,!,!,cf,=,"); // C = R/16 ESIL_A ("DUP,!,zf,=,"); // Z = !R } INST_HANDLER (fmulsu) { // FMULSU Rd, Rr int d = ((buf[0] >> 4) & 0x7) + 16; int r = (buf[0] & 0x7) + 16; ESIL_A ("1,"); ESIL_A ("r%d,DUP,0x80,&,?{,0xffff00,|,},", d); // sign extension Rd ESIL_A ("r%d", r); // unsigned Rr ESIL_A ("*,<<,"); // 0: (Rd*Rr)<<1 ESIL_A ("0xffff,&,"); // prevent overflow ESIL_A ("DUP,0xff,&,r0,=,"); // r0 = LO(0) ESIL_A ("8,0,RPICK,>>,0xff,&,r1,=,"); // r1 = HI(0) ESIL_A ("DUP,0x8000,&,!,!,cf,=,"); // C = R/16 ESIL_A ("DUP,!,zf,=,"); // Z = !R } INST_HANDLER (ijmp) { // IJMP k ut64 z; // read z for calculating jump address on runtime r_anal_esil_reg_read (anal->esil, "z", &z, NULL); // real target address may change during execution, so this value will // be changing all the time op->jump = z << 1; op->cycles = 2; ESIL_A ("1,z,<<,pc,=,"); // jump! } INST_HANDLER (icall) { // ICALL k // push pc in stack ESIL_A ("pc,"); // esil is already pointing to // next instruction (@ret) __generic_push (op, CPU_PC_SIZE (cpu)); // push @ret in stack // do a standard IJMP INST_CALL (ijmp); // fix cycles if (!STR_BEGINS (cpu->model, "ATxmega")) { // AT*mega optimizes 1 cycle! op->cycles--; } } INST_HANDLER (in) { // IN Rd, A int r = ((buf[0] >> 4) & 0x0f) | ((buf[1] & 0x01) << 4); int a = (buf[0] & 0x0f) | ((buf[1] & 0x6) << 3); RStrBuf *io_src = __generic_io_dest (a, 0, cpu); op->type2 = 0; op->val = a; op->family = R_ANAL_OP_FAMILY_IO; ESIL_A ("%s,r%d,=,", r_strbuf_get (io_src), r); r_strbuf_free (io_src); } INST_HANDLER (inc) { // INC Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); ESIL_A ("1,r%d,+,", d); // ++Rd // FLAGS: ESIL_A ("0,RPICK,0x80,==,vf,=,"); // V ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("vf,nf,^,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (jmp) { // JMP k op->jump = (buf[2] << 1) | (buf[3] << 9) | (buf[1] & 0x01) << 23 | (buf[0] & 0x01) << 17 | (buf[0] & 0xf0) << 14; op->cycles = 3; ESIL_A ("%"PFMT64d",pc,=,", op->jump); // jump! } INST_HANDLER (lac) { // LAC Z, Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); // read memory from RAMPZ:Z __generic_ld_st (op, "ram", 'z', 1, 0, 0, 0); // 0: Read (RAMPZ:Z) ESIL_A ("r%d,0xff,^,&,", d); // 0: (Z) & ~Rd ESIL_A ("DUP,r%d,=,", d); // Rd = [0] __generic_ld_st (op, "ram", 'z', 1, 0, 0, 1); // Store in RAM } INST_HANDLER (las) { // LAS Z, Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); // read memory from RAMPZ:Z __generic_ld_st (op, "ram", 'z', 1, 0, 0, 0); // 0: Read (RAMPZ:Z) ESIL_A ("r%d,|,", d); // 0: (Z) | Rd ESIL_A ("DUP,r%d,=,", d); // Rd = [0] __generic_ld_st (op, "ram", 'z', 1, 0, 0, 1); // Store in RAM } INST_HANDLER (lat) { // LAT Z, Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); // read memory from RAMPZ:Z __generic_ld_st (op, "ram", 'z', 1, 0, 0, 0); // 0: Read (RAMPZ:Z) ESIL_A ("r%d,^,", d); // 0: (Z) ^ Rd ESIL_A ("DUP,r%d,=,", d); // Rd = [0] __generic_ld_st (op, "ram", 'z', 1, 0, 0, 1); // Store in RAM } INST_HANDLER (ld) { // LD Rd, X // LD Rd, X+ // LD Rd, -X // read memory __generic_ld_st ( op, "ram", 'x', // use index register X 0, // no use RAMP* registers (buf[0] & 0xf) == 0xe ? -1 // pre decremented : (buf[0] & 0xf) == 0xd ? 1 // post incremented : 0, // no increment 0, // offset always 0 0); // load operation (!st) // load register ESIL_A ("r%d,=,", ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf)); // cycles op->cycles = (buf[0] & 0x3) == 0 ? 2 // LD Rd, X : (buf[0] & 0x3) == 1 ? 2 // LD Rd, X+ : 3; // LD Rd, -X if (!STR_BEGINS (cpu->model, "ATxmega") && op->cycles > 1) { // AT*mega optimizes 1 cycle! op->cycles--; } } INST_HANDLER (ldd) { // LD Rd, Y LD Rd, Z // LD Rd, Y+ LD Rd, Z+ // LD Rd, -Y LD Rd, -Z // LD Rd, Y+q LD Rd, Z+q // calculate offset (this value only has sense in some opcodes, // but we are optimistic and we calculate it always) int offset = (buf[1] & 0x20) | ((buf[1] & 0xc) << 1) | (buf[0] & 0x7); // read memory __generic_ld_st ( op, "ram", buf[0] & 0x8 ? 'y' : 'z', // index register Y/Z 0, // no use RAMP* registers !(buf[1] & 0x10) ? 0 // no increment : buf[0] & 0x1 ? 1 // post incremented : -1, // pre decremented !(buf[1] & 0x10) ? offset : 0, // offset or not offset 0); // load operation (!st) // load register ESIL_A ("r%d,=,", ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf)); // cycles op->cycles = (buf[1] & 0x10) == 0 ? (!offset ? 1 : 3) // LDD : (buf[0] & 0x3) == 0 ? 1 // LD Rd, X : (buf[0] & 0x3) == 1 ? 2 // LD Rd, X+ : 3; // LD Rd, -X if (!STR_BEGINS (cpu->model, "ATxmega") && op->cycles > 1) { // AT*mega optimizes 1 cycle! op->cycles--; } } INST_HANDLER (ldi) { // LDI Rd, K int k = (buf[0] & 0xf) + ((buf[1] & 0xf) << 4); int d = ((buf[0] >> 4) & 0xf) + 16; op->val = k; ESIL_A ("0x%x,r%d,=,", k, d); } INST_HANDLER (lds) { // LDS Rd, k if (len < 4) { return; } int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); int k = (buf[3] << 8) | buf[2]; op->ptr = k; // load value from RAMPD:k __generic_ld_st (op, "ram", 0, 1, 0, k, 0); ESIL_A ("r%d,=,", d); } INST_HANDLER (sts) { // STS k, Rr int r = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); int k = (buf[3] << 8) | buf[2]; op->ptr = k; ESIL_A ("r%d,", r); __generic_ld_st (op, "ram", 0, 1, 0, k, 1); op->cycles = 2; } #if 0 INST_HANDLER (lds16) { // LDS Rd, k int d = ((buf[0] >> 4) & 0xf) + 16; int k = (buf[0] & 0x0f) | ((buf[1] << 3) & 0x30) | ((buf[1] << 4) & 0x40) | (~(buf[1] << 4) & 0x80); op->ptr = k; // load value from @k __generic_ld_st (op, "ram", 0, 0, 0, k, 0); ESIL_A ("r%d,=,", d); } #endif INST_HANDLER (lpm) { // LPM // LPM Rd, Z // LPM Rd, Z+ ut16 ins = (((ut16) buf[1]) << 8) | ((ut16) buf[0]); // read program memory __generic_ld_st ( op, "prog", 'z', // index register Y/Z 1, // use RAMP* registers (ins & 0xfe0f) == 0x9005 ? 1 // post incremented : 0, // no increment 0, // not offset 0); // load operation (!st) // load register ESIL_A ("r%d,=,", (ins == 0x95c8) ? 0 // LPM (r0) : ((buf[0] >> 4) & 0xf) // LPM Rd | ((buf[1] & 0x1) << 4)); } INST_HANDLER (lsr) { // LSR Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); ESIL_A ("1,r%d,>>,", d); // 0: R=(Rd >> 1) ESIL_A ("r%d,0x1,&,!,!,cf,=,", d); // C = Rd0 ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("0,nf,=,"); // N ESIL_A ("nf,cf,^,vf,=,"); // V ESIL_A ("nf,vf,^,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = R } INST_HANDLER (mov) { // MOV Rd, Rr int d = ((buf[1] << 4) & 0x10) | ((buf[0] >> 4) & 0x0f); int r = ((buf[1] << 3) & 0x10) | (buf[0] & 0x0f); ESIL_A ("r%d,r%d,=,", r, d); } INST_HANDLER (movw) { // MOVW Rd+1:Rd, Rr+1:Rr int d = (buf[0] & 0xf0) >> 3; int r = (buf[0] & 0x0f) << 1; ESIL_A ("r%d,r%d,=,r%d,r%d,=,", r, d, r + 1, d + 1); } INST_HANDLER (mul) { // MUL Rd, Rr int d = ((buf[1] << 4) & 0x10) | ((buf[0] >> 4) & 0x0f); int r = ((buf[1] << 3) & 0x10) | (buf[0] & 0x0f); ESIL_A ("r%d,r%d,*,", r, d); // 0: (Rd*Rr)<<1 ESIL_A ("DUP,0xff,&,r0,=,"); // r0 = LO(0) ESIL_A ("8,0,RPICK,>>,0xff,&,r1,=,"); // r1 = HI(0) ESIL_A ("DUP,0x8000,&,!,!,cf,=,"); // C = R/15 ESIL_A ("DUP,!,zf,=,"); // Z = !R } INST_HANDLER (muls) { // MULS Rd, Rr int d = (buf[0] >> 4 & 0x0f) + 16; int r = (buf[0] & 0x0f) + 16; ESIL_A ("r%d,DUP,0x80,&,?{,0xffff00,|,},", r); // sign extension Rr ESIL_A ("r%d,DUP,0x80,&,?{,0xffff00,|,},", d); // sign extension Rd ESIL_A ("*,"); // 0: (Rd*Rr) ESIL_A ("DUP,0xff,&,r0,=,"); // r0 = LO(0) ESIL_A ("8,0,RPICK,>>,0xff,&,r1,=,"); // r1 = HI(0) ESIL_A ("DUP,0x8000,&,!,!,cf,=,"); // C = R/15 ESIL_A ("DUP,!,zf,=,"); // Z = !R } INST_HANDLER (mulsu) { // MULSU Rd, Rr int d = (buf[0] >> 4 & 0x07) + 16; int r = (buf[0] & 0x07) + 16; ESIL_A ("r%d,", r); // unsigned Rr ESIL_A ("r%d,DUP,0x80,&,?{,0xffff00,|,},", d); // sign extension Rd ESIL_A ("*,"); // 0: (Rd*Rr) ESIL_A ("DUP,0xff,&,r0,=,"); // r0 = LO(0) ESIL_A ("8,0,RPICK,>>,0xff,&,r1,=,"); // r1 = HI(0) ESIL_A ("DUP,0x8000,&,!,!,cf,=,"); // C = R/15 ESIL_A ("DUP,!,zf,=,"); // Z = !R } INST_HANDLER (neg) { // NEG Rd int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); ESIL_A ("r%d,0x00,-,0xff,&,", d); // 0: (0-Rd) ESIL_A ("DUP,r%d,0xff,^,|,0x08,&,!,!,hf,=,", d); // H ESIL_A ("DUP,0x80,-,!,vf,=,", d); // V ESIL_A ("DUP,0x80,&,!,!,nf,=,"); // N ESIL_A ("DUP,!,zf,=,"); // Z ESIL_A ("DUP,!,!,cf,=,"); // C ESIL_A ("vf,nf,^,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = result } INST_HANDLER (nop) { // NOP ESIL_A (",,"); } INST_HANDLER (or) { // OR Rd, Rr int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); int r = (buf[0] & 0xf) | ((buf[1] & 2) << 3); ESIL_A ("r%d,r%d,|,", r, d); // 0: (Rd | Rr) ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("0,vf,=,"); // V ESIL_A ("nf,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = result } INST_HANDLER (ori) { // ORI Rd, K // SBR Rd, K int d = ((buf[0] >> 4) & 0xf) + 16; int k = (buf[0] & 0xf) | ((buf[1] & 0xf) << 4); op->val = k; ESIL_A ("r%d,%d,|,", d, k); // 0: (Rd | k) ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("0,vf,=,"); // V ESIL_A ("nf,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = result } INST_HANDLER (out) { // OUT A, Rr int r = ((buf[0] >> 4) & 0x0f) | ((buf[1] & 0x01) << 4); int a = (buf[0] & 0x0f) | ((buf[1] & 0x6) << 3); RStrBuf *io_dst = __generic_io_dest (a, 1, cpu); op->type2 = 1; op->val = a; op->family = R_ANAL_OP_FAMILY_IO; ESIL_A ("r%d,%s,", r, r_strbuf_get (io_dst)); r_strbuf_free (io_dst); } INST_HANDLER (pop) { // POP Rd int d = ((buf[1] & 0x1) << 4) | ((buf[0] >> 4) & 0xf); __generic_pop (op, 1); ESIL_A ("r%d,=,", d); // store in Rd } INST_HANDLER (push) { // PUSH Rr int r = ((buf[1] & 0x1) << 4) | ((buf[0] >> 4) & 0xf); ESIL_A ("r%d,", r); // load Rr __generic_push (op, 1); // push it into stack // cycles op->cycles = !STR_BEGINS (cpu->model, "ATxmega") ? 1 // AT*mega optimizes one cycle : 2; } INST_HANDLER (rcall) { // RCALL k // target address op->jump = (op->addr + (((((buf[1] & 0xf) << 8) | buf[0]) << 1) | (((buf[1] & 0x8) ? ~((int) 0x1fff) : 0))) + 2) & CPU_PC_MASK (cpu); op->fail = op->addr + op->size; // esil ESIL_A ("pc,"); // esil already points to next // instruction (@ret) __generic_push (op, CPU_PC_SIZE (cpu)); // push @ret addr ESIL_A ("%"PFMT64d",pc,=,", op->jump); // jump! // cycles if (!strncasecmp (cpu->model, "ATtiny", 6)) { op->cycles = 4; // ATtiny is always slow } else { // PC size decides required runtime! op->cycles = cpu->pc <= 16 ? 3 : 4; if (!STR_BEGINS (cpu->model, "ATxmega")) { op->cycles--; // ATxmega optimizes one cycle } } } INST_HANDLER (ret) { // RET op->eob = true; // esil __generic_pop (op, CPU_PC_SIZE (cpu)); ESIL_A ("pc,=,"); // jump! // cycles if (CPU_PC_SIZE (cpu) > 2) { // if we have a bus bigger than 16 bit op->cycles++; // (i.e. a 22-bit bus), add one extra cycle } } INST_HANDLER (reti) { // RETI //XXX: There are not privileged instructions in ATMEL/AVR op->family = R_ANAL_OP_FAMILY_PRIV; // first perform a standard 'ret' INST_CALL (ret); // RETI: The I-bit is cleared by hardware after an interrupt // has occurred, and is set by the RETI instruction to enable // subsequent interrupts ESIL_A ("1,if,=,"); } INST_HANDLER (rjmp) { // RJMP k op->jump = (op->addr #ifdef _MSC_VER #pragma message ("anal_avr.c: WARNING: Probably broken on windows") + ((((( buf[1] & 0xf) << 9) | (buf[0] << 1))) | (buf[1] & 0x8 ? ~(0x1fff) : 0)) #else + ((((( (typeof (op->jump)) buf[1] & 0xf) << 9) | ((typeof (op->jump)) buf[0] << 1))) | (buf[1] & 0x8 ? ~((typeof (op->jump)) 0x1fff) : 0)) #endif + 2) & CPU_PC_MASK (cpu); ESIL_A ("%"PFMT64d",pc,=,", op->jump); } INST_HANDLER (ror) { // ROR Rd int d = ((buf[0] >> 4) & 0x0f) | ((buf[1] << 4) & 0x10); ESIL_A ("1,r%d,>>,7,cf,<<,|,", d); // 0: (Rd>>1) | (cf<<7) ESIL_A ("r%d,1,&,cf,=,", d); // C ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("0,RPICK,0x80,&,!,!,nf,=,"); // N ESIL_A ("nf,cf,^,vf,=,"); // V ESIL_A ("vf,nf,^,sf,=,"); // S ESIL_A ("r%d,=,", d); // Rd = result } INST_HANDLER (sbc) { // SBC Rd, Rr int r = (buf[0] & 0x0f) | ((buf[1] & 0x2) << 3); int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x1) << 4); ESIL_A ("cf,r%d,+,r%d,-,", r, d); // 0: (Rd-Rr-C) __generic_sub_update_flags_rr (op, d, r, 1); // FLAGS (carry) ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (sbci) { // SBCI Rd, k int d = ((buf[0] >> 4) & 0xf) + 16; int k = ((buf[1] & 0xf) << 4) | (buf[0] & 0xf); op->val = k; ESIL_A ("cf,%d,+,r%d,-,", k, d); // 0: (Rd-k-C) __generic_sub_update_flags_rk (op, d, k, 1); // FLAGS (carry) ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (sub) { // SUB Rd, Rr int d = ((buf[0] >> 4) & 0xf) | ((buf[1] & 1) << 4); int r = (buf[0] & 0xf) | ((buf[1] & 2) << 3); ESIL_A ("r%d,r%d,-,", r, d); // 0: (Rd-k) __generic_sub_update_flags_rr (op, d, r, 0); // FLAGS (no carry) ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (subi) { // SUBI Rd, k int d = ((buf[0] >> 4) & 0xf) + 16; int k = ((buf[1] & 0xf) << 4) | (buf[0] & 0xf); op->val = k; ESIL_A ("%d,r%d,-,", k, d); // 0: (Rd-k) __generic_sub_update_flags_rk (op, d, k, 1); // FLAGS (no carry) ESIL_A ("r%d,=,", d); // Rd = Result } INST_HANDLER (sbi) { // SBI A, b int a = (buf[0] >> 3) & 0x1f; int b = buf[0] & 0x07; RStrBuf *io_port; op->type2 = 1; op->val = a; op->family = R_ANAL_OP_FAMILY_IO; // read port a and clear bit b io_port = __generic_io_dest (a, 0, cpu); ESIL_A ("0xff,%d,1,<<,|,%s,&,", b, io_port); r_strbuf_free (io_port); // write result to port a io_port = __generic_io_dest (a, 1, cpu); ESIL_A ("%s,", r_strbuf_get (io_port)); r_strbuf_free (io_port); } INST_HANDLER (sbix) { // SBIC A, b // SBIS A, b int a = (buf[0] >> 3) & 0x1f; int b = buf[0] & 0x07; RAnalOp next_op; RStrBuf *io_port; op->type2 = 0; op->val = a; op->family = R_ANAL_OP_FAMILY_IO; // calculate next instruction size (call recursively avr_op_analyze) // and free next_op's esil string (we dont need it now) avr_op_analyze (anal, &next_op, op->addr + op->size, buf + op->size, len - op->size, cpu); r_strbuf_fini (&next_op.esil); op->jump = op->addr + next_op.size + 2; // cycles op->cycles = 1; // XXX: This is a bug, because depends on eval state, // so it cannot be really be known until this // instruction is executed by the ESIL interpreter!!! // In case of evaluating to false, this instruction // needs 2/3 cycles, elsewhere it needs only 1 cycle. // read port a and clear bit b io_port = __generic_io_dest (a, 0, cpu); ESIL_A ("%d,1,<<,%s,&,", b, io_port); // IO(A,b) ESIL_A ((buf[1] & 0xe) == 0xc ? "!," // SBIC => branch if 0 : "!,!,"); // SBIS => branch if 1 ESIL_A ("?{,%"PFMT64d",pc,=,},", op->jump); // ?true => jmp r_strbuf_free (io_port); } INST_HANDLER (sbiw) { // SBIW Rd+1:Rd, K int d = ((buf[0] & 0x30) >> 3) + 24; int k = (buf[0] & 0xf) | ((buf[0] >> 2) & 0x30); op->val = k; ESIL_A ("%d,r%d:r%d,-,", k, d + 1, d); // 0(Rd+1:Rd - Rr) ESIL_A ("r%d,0x80,&,!,!," // V "0,RPICK,0x8000,&,!," "&,vf,=,", d + 1); ESIL_A ("0,RPICK,0x8000,&,!,!,nf,=,"); // N ESIL_A ("0,RPICK,!,zf,=,"); // Z ESIL_A ("r%d,0x80,&,!," // C "0,RPICK,0x8000,&,!,!," "&,cf,=,", d + 1); ESIL_A ("vf,nf,^,sf,=,"); // S ESIL_A ("r%d:r%d,=,", d + 1, d); // Rd = result } INST_HANDLER (sbrx) { // SBRC Rr, b // SBRS Rr, b int b = buf[0] & 0x7; int r = ((buf[0] >> 4) & 0xf) | ((buf[1] & 0x01) << 4); RAnalOp next_op = {0}; // calculate next instruction size (call recursively avr_op_analyze) // and free next_op's esil string (we dont need it now) avr_op_analyze (anal, &next_op, op->addr + op->size, buf + op->size, len - op->size, cpu); r_strbuf_fini (&next_op.esil); op->jump = op->addr + next_op.size + 2; // cycles op->cycles = 1; // XXX: This is a bug, because depends on eval state, // so it cannot be really be known until this // instruction is executed by the ESIL interpreter!!! // In case of evaluating to false, this instruction // needs 2/3 cycles, elsewhere it needs only 1 cycle. ESIL_A ("%d,1,<<,r%d,&,", b, r); // Rr(b) ESIL_A ((buf[1] & 0xe) == 0xc ? "!," // SBRC => branch if cleared : "!,!,"); // SBRS => branch if set ESIL_A ("?{,%"PFMT64d",pc,=,},", op->jump); // ?true => jmp } INST_HANDLER (sleep) { // SLEEP ESIL_A ("BREAK"); } INST_HANDLER (spm) { // SPM Z+ ut64 spmcsr; // read SPM Control Register (SPMCR) r_anal_esil_reg_read (anal->esil, "spmcsr", &spmcsr, NULL); // clear SPMCSR ESIL_A ("0x7c,spmcsr,&=,"); // decide action depending on the old value of SPMCSR switch (spmcsr & 0x7f) { case 0x03: // PAGE ERASE // invoke SPM_CLEAR_PAGE (erases target page writing // the 0xff value ESIL_A ("16,rampz,<<,z,+,"); // push target address ESIL_A ("SPM_PAGE_ERASE,"); // do magic break; case 0x01: // FILL TEMPORARY BUFFER ESIL_A ("r1,r0,"); // push data ESIL_A ("z,"); // push target address ESIL_A ("SPM_PAGE_FILL,"); // do magic break; case 0x05: // WRITE PAGE ESIL_A ("16,rampz,<<,z,+,"); // push target address ESIL_A ("SPM_PAGE_WRITE,"); // do magic break; default: eprintf ("SPM: I dont know what to do with SPMCSR %02x.\n", (unsigned int) spmcsr); } op->cycles = 1; // This is truly false. Datasheets do not publish how // many cycles this instruction uses in all its // operation modes and I am pretty sure that this value // can vary substantially from one MCU type to another. // So... one cycle is fine. } INST_HANDLER (st) { // ST X, Rr // ST X+, Rr // ST -X, Rr // load register ESIL_A ("r%d,", ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf)); // write in memory __generic_ld_st ( op, "ram", 'x', // use index register X 0, // no use RAMP* registers (buf[0] & 0xf) == 0xe ? -1 // pre decremented : (buf[0] & 0xf) == 0xd ? 1 // post increment : 0, // no increment 0, // offset always 0 1); // store operation (st) // // cycles // op->cycles = buf[0] & 0x3 == 0 // ? 2 // LD Rd, X // : buf[0] & 0x3 == 1 // ? 2 // LD Rd, X+ // : 3; // LD Rd, -X // if (!STR_BEGINS (cpu->model, "ATxmega") && op->cycles > 1) { // // AT*mega optimizes 1 cycle! // op->cycles--; // } } INST_HANDLER (std) { // ST Y, Rr ST Z, Rr // ST Y+, Rr ST Z+, Rr // ST -Y, Rr ST -Z, Rr // ST Y+q, Rr ST Z+q, Rr // load register ESIL_A ("r%d,", ((buf[1] & 1) << 4) | ((buf[0] >> 4) & 0xf)); // write in memory __generic_ld_st ( op, "ram", buf[0] & 0x8 ? 'y' : 'z', // index register Y/Z 0, // no use RAMP* registers !(buf[1] & 0x10) ? 0 // no increment : buf[0] & 0x1 ? 1 // post incremented : -1, // pre decremented !(buf[1] & 0x10) ? (buf[1] & 0x20) // offset | ((buf[1] & 0xc) << 1) | (buf[0] & 0x7) : 0, // no offset 1); // load operation (!st) // // cycles // op->cycles = // buf[1] & 0x1 == 0 // ? !(offset ? 1 : 3) // LDD // : buf[0] & 0x3 == 0 // ? 1 // LD Rd, X // : buf[0] & 0x3 == 1 // ? 2 // LD Rd, X+ // : 3; // LD Rd, -X // if (!STR_BEGINS (cpu->model, "ATxmega") && op->cycles > 1) { // // AT*mega optimizes 1 cycle! // op->cycles--; // } } INST_HANDLER (swap) { // SWAP Rd int d = ((buf[1] & 0x1) << 4) | ((buf[0] >> 4) & 0xf); ESIL_A ("4,r%d,>>,0x0f,&,", d); // (Rd >> 4) & 0xf ESIL_A ("4,r%d,<<,0xf0,&,", d); // (Rd >> 4) & 0xf ESIL_A ("|,", d); // S[0] | S[1] ESIL_A ("r%d,=,", d); // Rd = result } OPCODE_DESC opcodes[] = { // op mask select cycles size type INST_DECL (break, 0xffff, 0x9698, 1, 2, TRAP ), // BREAK INST_DECL (eicall, 0xffff, 0x9519, 0, 2, UCALL ), // EICALL INST_DECL (eijmp, 0xffff, 0x9419, 0, 2, UJMP ), // EIJMP INST_DECL (icall, 0xffff, 0x9509, 0, 2, UCALL ), // ICALL INST_DECL (ijmp, 0xffff, 0x9409, 0, 2, UJMP ), // IJMP INST_DECL (lpm, 0xffff, 0x95c8, 3, 2, LOAD ), // LPM INST_DECL (nop, 0xffff, 0x0000, 1, 2, NOP ), // NOP INST_DECL (ret, 0xffff, 0x9508, 4, 2, RET ), // RET INST_DECL (reti, 0xffff, 0x9518, 4, 2, RET ), // RETI INST_DECL (sleep, 0xffff, 0x9588, 1, 2, NOP ), // SLEEP INST_DECL (spm, 0xffff, 0x95e8, 1, 2, TRAP ), // SPM ... INST_DECL (bclr, 0xff8f, 0x9488, 1, 2, SWI ), // BCLR s INST_DECL (bset, 0xff8f, 0x9408, 1, 2, SWI ), // BSET s INST_DECL (fmul, 0xff88, 0x0308, 2, 2, MUL ), // FMUL Rd, Rr INST_DECL (fmuls, 0xff88, 0x0380, 2, 2, MUL ), // FMULS Rd, Rr INST_DECL (fmulsu, 0xff88, 0x0388, 2, 2, MUL ), // FMULSU Rd, Rr INST_DECL (mulsu, 0xff88, 0x0300, 2, 2, AND ), // MUL Rd, Rr INST_DECL (des, 0xff0f, 0x940b, 0, 2, CRYPTO ), // DES k INST_DECL (adiw, 0xff00, 0x9600, 2, 2, ADD ), // ADIW Rd+1:Rd, K INST_DECL (sbiw, 0xff00, 0x9700, 2, 2, SUB ), // SBIW Rd+1:Rd, K INST_DECL (cbi, 0xff00, 0x9800, 1, 2, IO ), // CBI A, K INST_DECL (sbi, 0xff00, 0x9a00, 1, 2, IO ), // SBI A, K INST_DECL (movw, 0xff00, 0x0100, 1, 2, MOV ), // MOVW Rd+1:Rd, Rr+1:Rr INST_DECL (muls, 0xff00, 0x0200, 2, 2, AND ), // MUL Rd, Rr INST_DECL (asr, 0xfe0f, 0x9405, 1, 2, SAR ), // ASR Rd INST_DECL (com, 0xfe0f, 0x9400, 1, 2, SWI ), // BLD Rd, b INST_DECL (dec, 0xfe0f, 0x940a, 1, 2, SUB ), // DEC Rd INST_DECL (elpm, 0xfe0f, 0x9006, 0, 2, LOAD ), // ELPM Rd, Z INST_DECL (elpm, 0xfe0f, 0x9007, 0, 2, LOAD ), // ELPM Rd, Z+ INST_DECL (inc, 0xfe0f, 0x9403, 1, 2, ADD ), // INC Rd INST_DECL (lac, 0xfe0f, 0x9206, 2, 2, LOAD ), // LAC Z, Rd INST_DECL (las, 0xfe0f, 0x9205, 2, 2, LOAD ), // LAS Z, Rd INST_DECL (lat, 0xfe0f, 0x9207, 2, 2, LOAD ), // LAT Z, Rd INST_DECL (ld, 0xfe0f, 0x900c, 0, 2, LOAD ), // LD Rd, X INST_DECL (ld, 0xfe0f, 0x900d, 0, 2, LOAD ), // LD Rd, X+ INST_DECL (ld, 0xfe0f, 0x900e, 0, 2, LOAD ), // LD Rd, -X INST_DECL (lds, 0xfe0f, 0x9000, 0, 4, LOAD ), // LDS Rd, k INST_DECL (sts, 0xfe0f, 0x9200, 2, 4, STORE ), // STS k, Rr INST_DECL (lpm, 0xfe0f, 0x9004, 3, 2, LOAD ), // LPM Rd, Z INST_DECL (lpm, 0xfe0f, 0x9005, 3, 2, LOAD ), // LPM Rd, Z+ INST_DECL (lsr, 0xfe0f, 0x9406, 1, 2, SHR ), // LSR Rd INST_DECL (neg, 0xfe0f, 0x9401, 2, 2, SUB ), // NEG Rd INST_DECL (pop, 0xfe0f, 0x900f, 2, 2, POP ), // POP Rd INST_DECL (push, 0xfe0f, 0x920f, 0, 2, PUSH ), // PUSH Rr INST_DECL (ror, 0xfe0f, 0x9407, 1, 2, SAR ), // ROR Rd INST_DECL (st, 0xfe0f, 0x920c, 2, 2, STORE ), // ST X, Rr INST_DECL (st, 0xfe0f, 0x920d, 0, 2, STORE ), // ST X+, Rr INST_DECL (st, 0xfe0f, 0x920e, 0, 2, STORE ), // ST -X, Rr INST_DECL (swap, 0xfe0f, 0x9402, 1, 2, SAR ), // SWAP Rd INST_DECL (call, 0xfe0e, 0x940e, 0, 4, CALL ), // CALL k INST_DECL (jmp, 0xfe0e, 0x940c, 2, 4, JMP ), // JMP k INST_DECL (bld, 0xfe08, 0xf800, 1, 2, SWI ), // BLD Rd, b INST_DECL (bst, 0xfe08, 0xfa00, 1, 2, SWI ), // BST Rd, b INST_DECL (sbix, 0xfe08, 0x9900, 2, 2, CJMP ), // SBIC A, b INST_DECL (sbix, 0xfe08, 0x9900, 2, 2, CJMP ), // SBIS A, b INST_DECL (sbrx, 0xfe08, 0xfc00, 2, 2, CJMP ), // SBRC Rr, b INST_DECL (sbrx, 0xfe08, 0xfe00, 2, 2, CJMP ), // SBRS Rr, b INST_DECL (ldd, 0xfe07, 0x9001, 0, 2, LOAD ), // LD Rd, Y/Z+ INST_DECL (ldd, 0xfe07, 0x9002, 0, 2, LOAD ), // LD Rd, -Y/Z INST_DECL (std, 0xfe07, 0x9201, 0, 2, STORE ), // ST Y/Z+, Rr INST_DECL (std, 0xfe07, 0x9202, 0, 2, STORE ), // ST -Y/Z, Rr INST_DECL (adc, 0xfc00, 0x1c00, 1, 2, ADD ), // ADC Rd, Rr INST_DECL (add, 0xfc00, 0x0c00, 1, 2, ADD ), // ADD Rd, Rr INST_DECL (and, 0xfc00, 0x2000, 1, 2, AND ), // AND Rd, Rr INST_DECL (brbx, 0xfc00, 0xf000, 0, 2, CJMP ), // BRBS s, k INST_DECL (brbx, 0xfc00, 0xf400, 0, 2, CJMP ), // BRBC s, k INST_DECL (cp, 0xfc00, 0x1400, 1, 2, CMP ), // CP Rd, Rr INST_DECL (cpc, 0xfc00, 0x0400, 1, 2, CMP ), // CPC Rd, Rr INST_DECL (cpse, 0xfc00, 0x1000, 0, 2, CJMP ), // CPSE Rd, Rr INST_DECL (eor, 0xfc00, 0x2400, 1, 2, XOR ), // EOR Rd, Rr INST_DECL (mov, 0xfc00, 0x2c00, 1, 2, MOV ), // MOV Rd, Rr INST_DECL (mul, 0xfc00, 0x9c00, 2, 2, AND ), // MUL Rd, Rr INST_DECL (or, 0xfc00, 0x2800, 1, 2, OR ), // OR Rd, Rr INST_DECL (sbc, 0xfc00, 0x0800, 1, 2, SUB ), // SBC Rd, Rr INST_DECL (sub, 0xfc00, 0x1800, 1, 2, SUB ), // SUB Rd, Rr INST_DECL (in, 0xf800, 0xb000, 1, 2, IO ), // IN Rd, A //INST_DECL (lds16, 0xf800, 0xa000, 1, 2, LOAD ), // LDS Rd, k INST_DECL (out, 0xf800, 0xb800, 1, 2, IO ), // OUT A, Rr INST_DECL (andi, 0xf000, 0x7000, 1, 2, AND ), // ANDI Rd, K INST_DECL (cpi, 0xf000, 0x3000, 1, 2, CMP ), // CPI Rd, K INST_DECL (ldi, 0xf000, 0xe000, 1, 2, LOAD ), // LDI Rd, K INST_DECL (ori, 0xf000, 0x6000, 1, 2, OR ), // ORI Rd, K INST_DECL (rcall, 0xf000, 0xd000, 0, 2, CALL ), // RCALL k INST_DECL (rjmp, 0xf000, 0xc000, 2, 2, JMP ), // RJMP k INST_DECL (sbci, 0xf000, 0x4000, 1, 2, SUB ), // SBC Rd, Rr INST_DECL (subi, 0xf000, 0x5000, 1, 2, SUB ), // SUBI Rd, Rr INST_DECL (ldd, 0xd200, 0x8000, 0, 2, LOAD ), // LD Rd, Y/Z+q INST_DECL (std, 0xd200, 0x8200, 0, 2, STORE ), // ST Y/Z+q, Rr INST_LAST }; static OPCODE_DESC* avr_op_analyze(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *buf, int len, CPU_MODEL *cpu) { OPCODE_DESC *opcode_desc; if (len < 2) { return NULL; } ut16 ins = (buf[1] << 8) | buf[0]; int fail; char *t; // initialize op struct memset (op, 0, sizeof (RAnalOp)); op->ptr = UT64_MAX; op->val = UT64_MAX; op->jump = UT64_MAX; r_strbuf_init (&op->esil); // process opcode for (opcode_desc = opcodes; opcode_desc->handler; opcode_desc++) { if ((ins & opcode_desc->mask) == opcode_desc->selector) { fail = 0; // copy default cycles/size values op->cycles = opcode_desc->cycles; op->size = opcode_desc->size; op->type = opcode_desc->type; op->jump = UT64_MAX; op->fail = UT64_MAX; // op->fail = addr + op->size; op->addr = addr; // start void esil expression r_strbuf_setf (&op->esil, ""); // handle opcode opcode_desc->handler (anal, op, buf, len, &fail, cpu); if (fail) { goto INVALID_OP; } if (op->cycles <= 0) { // eprintf ("opcode %s @%"PFMT64x" returned 0 cycles.\n", opcode_desc->name, op->addr); opcode_desc->cycles = 2; } op->nopcode = (op->type == R_ANAL_OP_TYPE_UNK); // remove trailing coma (COMETE LA COMA) t = r_strbuf_get (&op->esil); if (t && strlen (t) > 1) { t += strlen (t) - 1; if (*t == ',') { *t = '\0'; } } return opcode_desc; } } // ignore reserved opcodes (if they have not been caught by the previous loop) if ((ins & 0xff00) == 0xff00 && (ins & 0xf) > 7) { goto INVALID_OP; } INVALID_OP: // An unknown or invalid option has appeared. // -- Throw pokeball! op->family = R_ANAL_OP_FAMILY_UNKNOWN; op->type = R_ANAL_OP_TYPE_UNK; op->addr = addr; op->fail = UT64_MAX; op->jump = UT64_MAX; op->ptr = UT64_MAX; op->val = UT64_MAX; op->nopcode = 1; op->cycles = 1; op->size = 2; // launch esil trap (for communicating upper layers about this weird // and stinky situation r_strbuf_set (&op->esil, "1,$"); return NULL; } static int avr_op(RAnal *anal, RAnalOp *op, ut64 addr, const ut8 *buf, int len) { CPU_MODEL *cpu; ut64 offset; // init op if (!op) { return 2; } // select cpu info cpu = get_cpu_model (anal->cpu); // set memory layout registers if (anal->esil) { offset = 0; r_anal_esil_reg_write (anal->esil, "_prog", offset); offset += (1 << cpu->pc); r_anal_esil_reg_write (anal->esil, "_io", offset); offset += const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "sram_start")); r_anal_esil_reg_write (anal->esil, "_sram", offset); offset += const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "sram_size")); r_anal_esil_reg_write (anal->esil, "_eeprom", offset); offset += const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "eeprom_size")); r_anal_esil_reg_write (anal->esil, "_page", offset); } // process opcode avr_op_analyze (anal, op, addr, buf, len, cpu); return op->size; } static int avr_custom_des (RAnalEsil *esil) { ut64 key, encrypt, text,des_round; ut32 key_lo, key_hi, buf_lo, buf_hi; if (!esil || !esil->anal || !esil->anal->reg) { return false; } if (!__esil_pop_argument (esil, &des_round)) { return false; } r_anal_esil_reg_read (esil, "hf", &encrypt, NULL); r_anal_esil_reg_read (esil, "deskey", &key, NULL); r_anal_esil_reg_read (esil, "text", &text, NULL); key_lo = key & UT32_MAX; key_hi = key >> 32; buf_lo = text & UT32_MAX; buf_hi = text >> 32; if (des_round != desctx.round) { desctx.round = des_round; } if (!desctx.round) { int i; //generating all round keys r_des_permute_key (&key_lo, &key_hi); for (i = 0; i < 16; i++) { r_des_round_key (i, &desctx.round_key_lo[i], &desctx.round_key_hi[i], &key_lo, &key_hi); } r_des_permute_block0 (&buf_lo, &buf_hi); } if (encrypt) { r_des_round (&buf_lo, &buf_hi, &desctx.round_key_lo[desctx.round], &desctx.round_key_hi[desctx.round]); } else { r_des_round (&buf_lo, &buf_hi, &desctx.round_key_lo[15 - desctx.round], &desctx.round_key_hi[15 - desctx.round]); } if (desctx.round == 15) { r_des_permute_block1 (&buf_hi, &buf_lo); desctx.round = 0; } else { desctx.round++; } r_anal_esil_reg_write (esil, "text", text); return true; } // ESIL operation SPM_PAGE_ERASE static int avr_custom_spm_page_erase(RAnalEsil *esil) { CPU_MODEL *cpu; ut8 c; ut64 addr, page_size_bits, i; // sanity check if (!esil || !esil->anal || !esil->anal->reg) { return false; } // get target address if (!__esil_pop_argument(esil, &addr)) { return false; } // get details about current MCU and fix input address cpu = get_cpu_model (esil->anal->cpu); page_size_bits = const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "page_size")); // align base address to page_size_bits addr &= ~(MASK (page_size_bits)); // perform erase //eprintf ("SPM_PAGE_ERASE %ld bytes @ 0x%08" PFMT64x ".\n", page_size, addr); c = 0xff; for (i = 0; i < (1ULL << page_size_bits); i++) { r_anal_esil_mem_write ( esil, (addr + i) & CPU_PC_MASK (cpu), &c, 1); } return true; } // ESIL operation SPM_PAGE_FILL static int avr_custom_spm_page_fill(RAnalEsil *esil) { CPU_MODEL *cpu; ut64 addr, page_size_bits, i; ut8 r0, r1; // sanity check if (!esil || !esil->anal || !esil->anal->reg) { return false; } // get target address, r0, r1 if (!__esil_pop_argument(esil, &addr)) { return false; } if (!__esil_pop_argument (esil, &i)) { return false; } r0 = i; if (!__esil_pop_argument (esil, &i)) { return false; } r1 = i; // get details about current MCU and fix input address cpu = get_cpu_model (esil->anal->cpu); page_size_bits = const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "page_size")); // align and crop base address addr &= (MASK (page_size_bits) ^ 1); // perform write to temporary page //eprintf ("SPM_PAGE_FILL bytes (%02x, %02x) @ 0x%08" PFMT64x ".\n", r1, r0, addr); r_anal_esil_mem_write (esil, addr++, &r0, 1); r_anal_esil_mem_write (esil, addr++, &r1, 1); return true; } // ESIL operation SPM_PAGE_WRITE static int avr_custom_spm_page_write(RAnalEsil *esil) { CPU_MODEL *cpu; char *t = NULL; ut64 addr, page_size_bits, tmp_page; // sanity check if (!esil || !esil->anal || !esil->anal->reg) { return false; } // get target address if (!__esil_pop_argument (esil, &addr)) { return false; } // get details about current MCU and fix input address and base address // of the internal temporary page cpu = get_cpu_model (esil->anal->cpu); page_size_bits = const_get_value (const_by_name (cpu, CPU_CONST_PARAM, "page_size")); r_anal_esil_reg_read (esil, "_page", &tmp_page, NULL); // align base address to page_size_bits addr &= (~(MASK (page_size_bits)) & CPU_PC_MASK (cpu)); // perform writing //eprintf ("SPM_PAGE_WRITE %ld bytes @ 0x%08" PFMT64x ".\n", page_size, addr); if (!(t = malloc (1 << page_size_bits))) { eprintf ("Cannot alloc a buffer for copying the temporary page.\n"); return false; } r_anal_esil_mem_read (esil, tmp_page, (ut8 *) t, 1 << page_size_bits); r_anal_esil_mem_write (esil, addr, (ut8 *) t, 1 << page_size_bits); return true; } static int esil_avr_hook_reg_write(RAnalEsil *esil, const char *name, ut64 *val) { CPU_MODEL *cpu; if (!esil || !esil->anal) { return 0; } // select cpu info cpu = get_cpu_model (esil->anal->cpu); // crop registers and force certain values if (!strcmp (name, "pc")) { *val &= CPU_PC_MASK (cpu); } else if (!strcmp (name, "pcl")) { if (cpu->pc < 8) { *val &= MASK (8); } } else if (!strcmp (name, "pch")) { *val = cpu->pc > 8 ? *val & MASK (cpu->pc - 8) : 0; } return 0; } static int esil_avr_init(RAnalEsil *esil) { if (!esil) { return false; } desctx.round = 0; r_anal_esil_set_op (esil, "des", avr_custom_des); r_anal_esil_set_op (esil, "SPM_PAGE_ERASE", avr_custom_spm_page_erase); r_anal_esil_set_op (esil, "SPM_PAGE_FILL", avr_custom_spm_page_fill); r_anal_esil_set_op (esil, "SPM_PAGE_WRITE", avr_custom_spm_page_write); esil->cb.hook_reg_write = esil_avr_hook_reg_write; return true; } static int esil_avr_fini(RAnalEsil *esil) { return true; } static int set_reg_profile(RAnal *anal) { const char *p = "=PC pcl\n" "=SP sp\n" // explained in http://www.nongnu.org/avr-libc/user-manual/FAQ.html // and http://www.avrfreaks.net/forum/function-calling-convention-gcc-generated-assembly-file "=A0 r25\n" "=A1 r24\n" "=A2 r23\n" "=A3 r22\n" "=R0 r24\n" #if 0 PC: 16- or 22-bit program counter SP: 8- or 16-bit stack pointer SREG: 8-bit status register RAMPX, RAMPY, RAMPZ, RAMPD and EIND: #endif // 8bit registers x 32 "gpr r0 .8 0 0\n" "gpr r1 .8 1 0\n" "gpr r2 .8 2 0\n" "gpr r3 .8 3 0\n" "gpr r4 .8 4 0\n" "gpr r5 .8 5 0\n" "gpr r6 .8 6 0\n" "gpr r7 .8 7 0\n" "gpr text .64 0 0\n" "gpr r8 .8 8 0\n" "gpr r9 .8 9 0\n" "gpr r10 .8 10 0\n" "gpr r11 .8 11 0\n" "gpr r12 .8 12 0\n" "gpr r13 .8 13 0\n" "gpr r14 .8 14 0\n" "gpr r15 .8 15 0\n" "gpr deskey .64 8 0\n" "gpr r16 .8 16 0\n" "gpr r17 .8 17 0\n" "gpr r18 .8 18 0\n" "gpr r19 .8 19 0\n" "gpr r20 .8 20 0\n" "gpr r21 .8 21 0\n" "gpr r22 .8 22 0\n" "gpr r23 .8 23 0\n" "gpr r24 .8 24 0\n" "gpr r25 .8 25 0\n" "gpr r26 .8 26 0\n" "gpr r27 .8 27 0\n" "gpr r28 .8 28 0\n" "gpr r29 .8 29 0\n" "gpr r30 .8 30 0\n" "gpr r31 .8 31 0\n" // 16 bit overlapped registers for 16 bit math "gpr r17:r16 .16 16 0\n" "gpr r19:r18 .16 18 0\n" "gpr r21:r20 .16 20 0\n" "gpr r23:r22 .16 22 0\n" "gpr r25:r24 .16 24 0\n" "gpr r27:r26 .16 26 0\n" "gpr r29:r28 .16 28 0\n" "gpr r31:r30 .16 30 0\n" // 16 bit overlapped registers for memory addressing "gpr x .16 26 0\n" "gpr y .16 28 0\n" "gpr z .16 30 0\n" // program counter // NOTE: program counter size in AVR depends on the CPU model. It seems that // the PC may range from 16 bits to 22 bits. "gpr pc .32 32 0\n" "gpr pcl .16 32 0\n" "gpr pch .16 34 0\n" // special purpose registers "gpr sp .16 36 0\n" "gpr spl .8 36 0\n" "gpr sph .8 37 0\n" // status bit register (SREG) "gpr sreg .8 38 0\n" "gpr cf .1 38.0 0\n" // Carry. This is a borrow flag on subtracts. "gpr zf .1 38.1 0\n" // Zero. Set to 1 when an arithmetic result is zero. "gpr nf .1 38.2 0\n" // Negative. Set to a copy of the most significant bit of an arithmetic result. "gpr vf .1 38.3 0\n" // Overflow flag. Set in case of two's complement overflow. "gpr sf .1 38.4 0\n" // Sign flag. Unique to AVR, this is always (N ^ V) (xor), and shows the true sign of a comparison. "gpr hf .1 38.5 0\n" // Half carry. This is an internal carry from additions and is used to support BCD arithmetic. "gpr tf .1 38.6 0\n" // Bit copy. Special bit load and bit store instructions use this bit. "gpr if .1 38.7 0\n" // Interrupt flag. Set when interrupts are enabled. // 8bit segment registers to be added to X, Y, Z to get 24bit offsets "gpr rampx .8 39 0\n" "gpr rampy .8 40 0\n" "gpr rampz .8 41 0\n" "gpr rampd .8 42 0\n" "gpr eind .8 43 0\n" // memory mapping emulator registers // _prog // the program flash. It has its own address space. // _ram // _io // start of the data addres space. It is the same address of IO, // because IO is the first memory space addressable in the AVR. // _sram // start of the SRAM (this offset depends on IO size, and it is // inside the _ram address space) // _eeprom // this is another address space, outside ram and flash // _page // this is the temporary page used by the SPM instruction. This // memory is not directly addressable and it is used internally by // the CPU when autoflashing. "gpr _prog .32 44 0\n" "gpr _page .32 48 0\n" "gpr _eeprom .32 52 0\n" "gpr _ram .32 56 0\n" "gpr _io .32 56 0\n" "gpr _sram .32 60 0\n" // other important MCU registers // spmcsr/spmcr // Store Program Memory Control and Status Register (SPMCSR) "gpr spmcsr .8 64 0\n" ; return r_reg_set_profile_string (anal->reg, p); } static int archinfo(RAnal *anal, int q) { if (q == R_ANAL_ARCHINFO_ALIGN) return 2; if (q == R_ANAL_ARCHINFO_MAX_OP_SIZE) return 4; if (q == R_ANAL_ARCHINFO_MIN_OP_SIZE) return 2; return 2; // XXX } static ut8 *anal_mask_avr(RAnal *anal, int size, const ut8 *data, ut64 at) { RAnalOp *op = NULL; ut8 *ret = NULL; int idx; if (!(op = r_anal_op_new ())) { return NULL; } if (!(ret = malloc (size))) { r_anal_op_free (op); return NULL; } memset (ret, 0xff, size); CPU_MODEL *cpu = get_cpu_model (anal->cpu); for (idx = 0; idx + 1 < size; idx += op->size) { OPCODE_DESC* opcode_desc = avr_op_analyze (anal, op, at + idx, data + idx, size - idx, cpu); if (op->size < 1) { break; } if (!opcode_desc) { // invalid instruction continue; } // the additional data for "long" opcodes (4 bytes) is usually something we want to ignore for matching // (things like memory offsets or jump addresses) if (op->size == 4) { ret[idx + 2] = 0; ret[idx + 3] = 0; } if (op->ptr != UT64_MAX || op->jump != UT64_MAX) { ret[idx] = opcode_desc->mask; ret[idx + 1] = opcode_desc->mask >> 8; } } r_anal_op_free (op); return ret; } RAnalPlugin r_anal_plugin_avr = { .name = "avr", .desc = "AVR code analysis plugin", .license = "LGPL3", .arch = "avr", .esil = true, .archinfo = archinfo, .bits = 8 | 16, // 24 big regs conflicts .op = &avr_op, .set_reg_profile = &set_reg_profile, .esil_init = esil_avr_init, .esil_fini = esil_avr_fini, .anal_mask = anal_mask_avr, }; #ifndef CORELIB RLibStruct radare_plugin = { .type = R_LIB_TYPE_ANAL, .data = &r_anal_plugin_avr, .version = R2_VERSION }; #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_148_0
crossvul-cpp_data_bad_1314_0
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 */ #include <linux/fs.h> #include <linux/time.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/workqueue.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/iomap.h> #include <linux/iversion.h> #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "truncate.h" #include <trace/events/ext4.h> #define MPAGE_DA_EXTENT_TAIL 0x01 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __u16 dummy_csum = 0; int offset = offsetof(struct ext4_inode, i_checksum_lo); unsigned int csum_size = sizeof(dummy_csum); csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, EXT4_GOOD_OLD_INODE_SIZE - offset); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { offset = offsetof(struct ext4_inode, i_checksum_hi); csum = ext4_chksum(sbi, csum, (__u8 *)raw + EXT4_GOOD_OLD_INODE_SIZE, offset - EXT4_GOOD_OLD_INODE_SIZE); if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; } csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, EXT4_INODE_SIZE(inode->i_sb) - offset); } return csum; } static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 provided, calculated; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_metadata_csum(inode->i_sb)) return 1; provided = le16_to_cpu(raw->i_checksum_lo); calculated = ext4_inode_csum(inode, raw, ei); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; else calculated &= 0xFFFF; return provided == calculated; } static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 csum; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_metadata_csum(inode->i_sb)) return; csum = ext4_inode_csum(inode, raw, ei); raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) raw->i_checksum_hi = cpu_to_le16(csum >> 16); } static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { trace_ext4_begin_ordered_truncate(inode, new_size); /* * If jinode is zero, then we never opened the file for * writing, so there's no need to call * jbd2_journal_begin_ordered_truncate() since there's no * outstanding writes we need to flush. */ if (!EXT4_I(inode)->jinode) return 0; return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode, new_size); } static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); /* * Test whether an inode is a fast symlink. * A fast symlink has its symlink data stored in ext4_inode_info->i_data. */ int ext4_inode_is_fast_symlink(struct inode *inode) { if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { int ea_blocks = EXT4_I(inode)->i_file_acl ? EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; if (ext4_has_inline_data(inode)) return 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } return S_ISLNK(inode->i_mode) && inode->i_size && (inode->i_size < EXT4_N_BLOCKS * 4); } /* * Called at the last iput() if i_nlink is zero. */ void ext4_evict_inode(struct inode *inode) { handle_t *handle; int err; /* * Credits for final inode cleanup and freeing: * sb + inode (ext4_orphan_del()), block bitmap, group descriptor * (xattr block freeing), bitmap, group descriptor (inode freeing) */ int extra_credits = 6; struct ext4_xattr_inode_array *ea_inode_array = NULL; trace_ext4_evict_inode(inode); if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the * journal. So although mm thinks everything is clean and * ready for reaping the inode might still have some pages to * write in the running transaction or waiting to be * checkpointed. Thus calling jbd2_journal_invalidatepage() * (via truncate_inode_pages()) to discard these buffers can * cause data loss. Also even if we did not discard these * buffers, we would have no way to find them after the inode * is reaped and thus user could see stale data if he tries to * read them before the transaction is checkpointed. So be * careful and force everything to disk here... We use * ei->i_datasync_tid to store the newest transaction * containing inode's data. * * Note that directories do not have this problem because they * don't use page cache. */ if (inode->i_ino != EXT4_JOURNAL_INO && ext4_should_journal_data(inode) && (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && inode->i_data.nrpages) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; jbd2_complete_transaction(journal, commit_tid); filemap_write_and_wait(&inode->i_data); } truncate_inode_pages_final(&inode->i_data); goto no_delete; } if (is_bad_inode(inode)) goto no_delete; dquot_initialize(inode); if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages_final(&inode->i_data); /* * Protect us against freezing - iput() caller didn't have to have any * protection against it */ sb_start_intwrite(inode->i_sb); if (!IS_NOQUOTA(inode)) extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); /* * Block bitmap, group descriptor, and inode are accounted in both * ext4_blocks_for_truncate() and extra_credits. So subtract 3. */ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, ext4_blocks_for_truncate(inode) + extra_credits - 3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext4_orphan_del(NULL, inode); sb_end_intwrite(inode->i_sb); goto no_delete; } if (IS_SYNC(inode)) ext4_handle_sync(handle); /* * Set inode->i_size to 0 before calling ext4_truncate(). We need * special handling of symlinks here because i_size is used to * determine whether ext4_inode_info->i_data contains symlink data or * block mappings. Setting i_size to 0 will remove its fast symlink * status. Erase i_data so that it becomes a valid empty block map. */ if (ext4_inode_is_fast_symlink(inode)) memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); inode->i_size = 0; err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_warning(inode->i_sb, "couldn't mark inode dirty (err %d)", err); goto stop_handle; } if (inode->i_blocks) { err = ext4_truncate(inode); if (err) { ext4_error(inode->i_sb, "couldn't truncate inode %lu (err %d)", inode->i_ino, err); goto stop_handle; } } /* Remove xattr references. */ err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, extra_credits); if (err) { ext4_warning(inode->i_sb, "xattr delete (err %d)", err); stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); goto no_delete; } /* * Kill off the orphan record which ext4_truncate created. * AKPM: I think this can be inside the above `if'. * Note that ext4_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - this is because we don't * know if ext4_truncate() actually created an orphan record. * (Well, we could do this if we need to, but heck - it works) */ ext4_orphan_del(handle, inode); EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext4_mark_inode_dirty(handle, inode)) /* If that failed, just do the required in-core inode clear. */ ext4_clear_inode(inode); else ext4_free_inode(handle, inode); ext4_journal_stop(handle); sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); return; no_delete: ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ } #ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) { return &EXT4_I(inode)->i_reserved_quota; } #endif /* * Called with i_data_sem down, which is important since we can call * ext4_discard_preallocations() from here. */ void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); spin_lock(&ei->i_block_reservation_lock); trace_ext4_da_update_reserve_space(inode, used, quota_claim); if (unlikely(used > ei->i_reserved_data_blocks)) { ext4_warning(inode->i_sb, "%s: ino %lu, used %d " "with only %d reserved data blocks", __func__, inode->i_ino, used, ei->i_reserved_data_blocks); WARN_ON(1); used = ei->i_reserved_data_blocks; } /* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* Update quota subsystem for data blocks */ if (quota_claim) dquot_claim_block(inode, EXT4_C2B(sbi, used)); else { /* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should * not re-claim the quota for fallocated blocks. */ dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); } /* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) ext4_discard_preallocations(inode); } static int __check_block_validity(struct inode *inode, const char *func, unsigned int line, struct ext4_map_blocks *map) { if (ext4_has_feature_journal(inode->i_sb) && (inode->i_ino == le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) return 0; if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, map->m_pblk, map->m_len); return -EFSCORRUPTED; } return 0; } int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, ext4_lblk_t len) { int ret; if (IS_ENCRYPTED(inode)) return fscrypt_zeroout_range(inode, lblk, pblk, len); ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); if (ret > 0) ret = 0; return ret; } #define check_block_validity(inode, map) \ __check_block_validity((inode), __func__, __LINE__, (map)) #ifdef ES_AGGRESSIVE_TEST static void ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, struct ext4_map_blocks *es_map, struct ext4_map_blocks *map, int flags) { int retval; map->m_flags = 0; /* * There is a race window that the result is not the same. * e.g. xfstests #223 when dioread_nolock enables. The reason * is that we lookup a block mapping in extent status tree with * out taking i_data_sem. So at the time the unwritten extent * could be converted. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } up_read((&EXT4_I(inode)->i_data_sem)); /* * We don't check m_len because extent will be collpased in status * tree. So the m_len might not equal. */ if (es_map->m_lblk != map->m_lblk || es_map->m_flags != map->m_flags || es_map->m_pblk != map->m_pblk) { printk("ES cache assertion failed for inode: %lu " "es_cached ex [%d/%d/%llu/%x] != " "found ex [%d/%d/%llu/%x] retval %d flags %x\n", inode->i_ino, es_map->m_lblk, es_map->m_len, es_map->m_pblk, es_map->m_flags, map->m_lblk, map->m_len, map->m_pblk, map->m_flags, retval, flags); } } #endif /* ES_AGGRESSIVE_TEST */ /* * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. * * Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * * If file type is extents based, it will call ext4_ext_map_blocks(), * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping * based files * * On success, it returns the number of blocks being mapped or allocated. if * create==0 and the blocks are pre-allocated and unwritten, the resulting @map * is marked as unwritten. If the create == 1, it will mark @map as mapped. * * It returns 0 if plain look up failed (blocks have not been allocated), in * that case, @map is returned as unmapped but we still do fill map->m_len to * indicate the length of a hole starting at map->m_lblk. * * It returns the error in case of allocation failure. */ int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct extent_status es; int retval; int ret = 0; #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif map->m_flags = 0; ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," "logical block %lu\n", inode->i_ino, flags, map->m_len, (unsigned long) map->m_lblk); /* * ext4_map_blocks returns an int, and m_len is an unsigned int */ if (unlikely(map->m_len > INT_MAX)) map->m_len = INT_MAX; /* We can handle the block number less than EXT_MAX_BLOCKS */ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) return -EFSCORRUPTED; /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; map->m_flags |= ext4_es_is_written(&es) ? EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { map->m_pblk = 0; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; retval = 0; } else { BUG(); } #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(handle, inode, map, &orig_map, flags); #endif goto found; } /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } if (retval > 0) { unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && !(status & EXTENT_STATUS_WRITTEN) && ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret < 0) retval = ret; } up_read((&EXT4_I(inode)->i_data_sem)); found: if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; } /* If it is only a block(s) look up */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) return retval; /* * Returns if the blocks have already allocated * * Note that if blocks have been preallocated * ext4_ext_get_block() returns the create = 0 * with buffer head unmapped. */ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) /* * If we need to convert extent to unwritten * we continue and do the actual work in * ext4_ext_map_blocks() */ if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) return retval; /* * Here we clear m_flags because after allocating an new extent, * it will be set again. */ map->m_flags &= ~EXT4_MAP_FLAGS; /* * New blocks allocate and/or writing to unwritten extent * will possibly result in updating i_data, so we take * the write lock of i_data_sem, and call get_block() * with create == 1 flag. */ down_write(&EXT4_I(inode)->i_data_sem); /* * We need to check for EXT4 here because migrate * could have changed the inode type in between */ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags); if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { /* * We allocated new blocks which will result in * i_data's format changing. Force the migrate * to fail by clearing migrate flags */ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); } /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. We don't * support fallocate for non extent files. So we can update * reserve space here. */ if ((retval > 0) && (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) ext4_da_update_reserve_space(inode, retval, 1); } if (retval > 0) { unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } /* * We have to zeroout blocks before inserting them into extent * status tree. Otherwise someone could look them up there and * use them before they are really zeroed. We also have to * unmap metadata before zeroing as otherwise writeback can * overwrite zeros with stale data from block device. */ if (flags & EXT4_GET_BLOCKS_ZERO && map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) { ret = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk, map->m_len); if (ret) { retval = ret; goto out_sem; } } /* * If the extent has been zeroed out, we don't need to update * extent status tree. */ if ((flags & EXT4_GET_BLOCKS_PRE_IO) && ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es)) goto out_sem; } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && !(status & EXTENT_STATUS_WRITTEN) && ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret < 0) { retval = ret; goto out_sem; } } out_sem: up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; /* * Inodes with freshly allocated blocks where contents will be * visible after transaction commit must be on transaction's * ordered data list. */ if (map->m_flags & EXT4_MAP_NEW && !(map->m_flags & EXT4_MAP_UNWRITTEN) && !(flags & EXT4_GET_BLOCKS_ZERO) && !ext4_is_quota_file(inode) && ext4_should_order_data(inode)) { loff_t start_byte = (loff_t)map->m_lblk << inode->i_blkbits; loff_t length = (loff_t)map->m_len << inode->i_blkbits; if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) ret = ext4_jbd2_inode_add_wait(handle, inode, start_byte, length); else ret = ext4_jbd2_inode_add_write(handle, inode, start_byte, length); if (ret) return ret; } } return retval; } /* * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages * we have to be careful as someone else may be manipulating b_state as well. */ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) { unsigned long old_state; unsigned long new_state; flags &= EXT4_MAP_FLAGS; /* Dummy buffer_head? Set non-atomically. */ if (!bh->b_page) { bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; return; } /* * Someone else may be modifying b_state. Be careful! This is ugly but * once we get rid of using bh as a container for mapping information * to pass to / from get_block functions, this can go away. */ do { old_state = READ_ONCE(bh->b_state); new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; } while (unlikely( cmpxchg(&bh->b_state, old_state, new_state) != old_state)); } static int _ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int flags) { struct ext4_map_blocks map; int ret = 0; if (ext4_has_inline_data(inode)) return -ERANGE; map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, flags); if (ret > 0) { map_bh(bh, inode->i_sb, map.m_pblk); ext4_update_bh_state(bh, map.m_flags); bh->b_size = inode->i_sb->s_blocksize * map.m_len; ret = 0; } else if (ret == 0) { /* hole case, need to fill in bh->b_size */ bh->b_size = inode->i_sb->s_blocksize * map.m_len; } return ret; } int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { return _ext4_get_block(inode, iblock, bh, create ? EXT4_GET_BLOCKS_CREATE : 0); } /* * Get block function used when preparing for buffered write if we require * creating an unwritten extent if blocks haven't been allocated. The extent * will be converted to written after the IO is complete. */ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", inode->i_ino, create); return _ext4_get_block(inode, iblock, bh_result, EXT4_GET_BLOCKS_IO_CREATE_EXT); } /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 /* * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct ext4_map_blocks map; struct buffer_head *bh; int create = map_flags & EXT4_GET_BLOCKS_CREATE; int err; J_ASSERT(handle != NULL || create == 0); map.m_lblk = block; map.m_len = 1; err = ext4_map_blocks(handle, inode, &map, map_flags); if (err == 0) return create ? ERR_PTR(-ENOSPC) : NULL; if (err < 0) return ERR_PTR(err); bh = sb_getblk(inode->i_sb, map.m_pblk); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); if (map.m_flags & EXT4_MAP_NEW) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); /* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext4_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, bh); if (unlikely(err)) { unlock_buffer(bh); goto errout; } if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (unlikely(err)) goto errout; } else BUFFER_TRACE(bh, "not a new buffer"); return bh; errout: brelse(bh); return ERR_PTR(err); } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct buffer_head *bh; bh = ext4_getblk(handle, inode, block, map_flags); if (IS_ERR(bh)) return bh; if (!bh || ext4_buffer_uptodate(bh)) return bh; ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); return ERR_PTR(-EIO); } /* Read a contiguous batch of blocks. */ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, bool wait, struct buffer_head **bhs) { int i, err; for (i = 0; i < bh_count; i++) { bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); if (IS_ERR(bhs[i])) { err = PTR_ERR(bhs[i]); bh_count = i; goto out_brelse; } } for (i = 0; i < bh_count; i++) /* Note that NULL bhs[i] is valid because of holes. */ if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bhs[i]); if (!wait) return 0; for (i = 0; i < bh_count; i++) if (bhs[i]) wait_on_buffer(bhs[i]); for (i = 0; i < bh_count; i++) { if (bhs[i] && !buffer_uptodate(bhs[i])) { err = -EIO; goto out_brelse; } } return 0; out_brelse: for (i = 0; i < bh_count; i++) { brelse(bhs[i]); bhs[i] = NULL; } return err; } int ext4_walk_page_buffers(handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for (bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot * close off a transaction and start a new one between the ext4_get_block() * and the commit_write(). So doing the jbd2_journal_start at the start of * prepare_write() is the right place. * * Also, this function can nest inside ext4_writepage(). In that case, we * *know* that ext4_writepage() has generated enough buffer credits to do the * whole page. So we won't block on the journal in that case, which is good, * because the caller may be PF_MEMALLOC. * * By accident, ext4 can be reentered when a transaction is open via * quota file writes. If we were to commit the transaction while thus * reentered, there can be a deadlock - we would be holding a quota * lock, and the commit would never complete if another thread had a * transaction open and was blocking on the quota lock - a ranking * violation. * * So what we do is to rely on the fact that jbd2_journal_stop/journal_start * will _not_ run commit under these circumstances because handle->h_ref * is elevated. We'll still have enough credits for the tiny quotafile * write. */ int do_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { int dirty = buffer_dirty(bh); int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; /* * __block_write_begin() could have dirtied some buffers. Clean * the dirty bit as jbd2_journal_get_write_access() could complain * otherwise about fs integrity issues. Setting of the dirty bit * by __block_write_begin() isn't a real problem here as we clear * the bit before releasing a page lock and thus writeback cannot * ever write the buffer. */ if (dirty) clear_buffer_dirty(bh); BUFFER_TRACE(bh, "get write access"); ret = ext4_journal_get_write_access(handle, bh); if (!ret && dirty) ret = ext4_handle_dirty_metadata(handle, NULL, bh); return ret; } #ifdef CONFIG_FS_ENCRYPTION static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; sector_t block; int err = 0; unsigned blocksize = inode->i_sb->s_blocksize; unsigned bbits; struct buffer_head *bh, *head, *wait[2]; int nr_wait = 0; int i; BUG_ON(!PageLocked(page)); BUG_ON(from > PAGE_SIZE); BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bbits = ilog2(blocksize); block = (sector_t)page->index << (PAGE_SHIFT - bbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; if (buffer_new(bh)) { if (PageUptodate(page)) { clear_buffer_new(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); continue; } if (block_end > to || block_start < from) zero_user_segments(page, to, block_end, block_start, from); continue; } } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); wait[nr_wait++] = bh; } } /* * If we issued read requests, let them complete. */ for (i = 0; i < nr_wait; i++) { wait_on_buffer(wait[i]); if (!buffer_uptodate(wait[i])) err = -EIO; } if (unlikely(err)) { page_zero_new_buffers(page, from, to); } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { for (i = 0; i < nr_wait; i++) { int err2; err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize, bh_offset(wait[i])); if (err2) { clear_buffer_uptodate(wait[i]); err = err2; } } } return err; } #endif static int ext4_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int ret, needed_blocks; handle_t *handle; int retries = 0; struct page *page; pgoff_t index; unsigned from, to; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; trace_ext4_write_begin(inode, pos, len, flags); /* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; index = pos >> PAGE_SHIFT; from = pos & (PAGE_SIZE - 1); to = from + len; if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, flags, pagep); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { put_page(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); put_page(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_FS_ENCRYPTION if (ext4_should_dioread_nolock(inode)) ret = ext4_block_write_begin(page, pos, len, ext4_get_block_unwritten); else ret = ext4_block_write_begin(page, pos, len, ext4_get_block); #else if (ext4_should_dioread_nolock(inode)) ret = __block_write_begin(page, pos, len, ext4_get_block_unwritten); else ret = __block_write_begin(page, pos, len, ext4_get_block); #endif if (!ret && ext4_should_journal_data(inode)) { ret = ext4_walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } if (ret) { bool extended = (pos + len > inode->i_size) && !ext4_verity_in_progress(inode); unlock_page(page); /* * __block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. * * Add inode to orphan list in case we crash before * truncate finishes */ if (extended && ext4_can_truncate(inode)) ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (extended) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; put_page(page); return ret; } *pagep = page; return ret; } /* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct buffer_head *bh) { int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); ret = ext4_handle_dirty_metadata(handle, NULL, bh); clear_buffer_meta(bh); clear_buffer_prio(bh); return ret; } /* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * * ext4 never places buffers on inode->i_mapping->private_list. metadata * buffers are managed internally. */ static int ext4_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; int inline_data = ext4_has_inline_data(inode); bool verity = ext4_verity_in_progress(inode); trace_ext4_write_end(inode, pos, len, copied); if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { unlock_page(page); put_page(page); goto errout; } copied = ret; } else copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. * * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree * blocks are being written past EOF, so skip the i_size update. */ if (!verity) i_size_changed = ext4_update_inode_size(inode, pos + copied); unlock_page(page); put_page(page); if (old_size < pos && !verity) pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed || inline_data) ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); errout: ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size && !verity) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * This is a private version of page_zero_new_buffers() which doesn't * set the buffer to be dirty, since in data=journalled mode we need * to call ext4_handle_dirty_metadata() instead. */ static void ext4_journalled_zero_new_buffers(handle_t *handle, struct page *page, unsigned from, unsigned to) { unsigned int block_start = 0, block_end; struct buffer_head *head, *bh; bh = head = page_buffers(page); do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, size; start = max(from, block_start); size = min(to, block_end) - start; zero_user(page, start, size); write_end_fn(handle, bh); } clear_buffer_new(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } static int ext4_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int partial = 0; unsigned from, to; int size_changed = 0; int inline_data = ext4_has_inline_data(inode); bool verity = ext4_verity_in_progress(inode); trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_SIZE - 1); to = from + len; BUG_ON(!ext4_handle_valid(handle)); if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { unlock_page(page); put_page(page); goto errout; } copied = ret; } else if (unlikely(copied < len) && !PageUptodate(page)) { copied = 0; ext4_journalled_zero_new_buffers(handle, page, from, to); } else { if (unlikely(copied < len)) ext4_journalled_zero_new_buffers(handle, page, from + copied, to); ret = ext4_walk_page_buffers(handle, page_buffers(page), from, from + copied, &partial, write_end_fn); if (!partial) SetPageUptodate(page); } if (!verity) size_changed = ext4_update_inode_size(inode, pos + copied); ext4_set_inode_state(inode, EXT4_STATE_JDATA); EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; unlock_page(page); put_page(page); if (old_size < pos && !verity) pagecache_isize_extended(inode, old_size, pos); if (size_changed || inline_data) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; } if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); errout: ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size && !verity) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * Reserve space for a single cluster */ static int ext4_da_reserve_space(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int ret; /* * We will charge metadata quota at writeout time; this saves * us from metadata over-estimation, though we may go over by * a small amount in the end. Here we just reserve for data. */ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); if (ret) return ret; spin_lock(&ei->i_block_reservation_lock); if (ext4_claim_free_clusters(sbi, 1, 0)) { spin_unlock(&ei->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); return -ENOSPC; } ei->i_reserved_data_blocks++; trace_ext4_da_reserve_space(inode); spin_unlock(&ei->i_block_reservation_lock); return 0; /* success */ } void ext4_da_release_space(struct inode *inode, int to_free) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); if (!to_free) return; /* Nothing to release, exit */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); trace_ext4_da_release_space(inode, to_free); if (unlikely(to_free > ei->i_reserved_data_blocks)) { /* * if there aren't enough reserved blocks, then the * counter is messed up somewhere. Since this * function is called from invalidate page, it's * harmless to return without any action. */ ext4_warning(inode->i_sb, "ext4_da_release_space: " "ino %lu, to_free %d with only %d reserved " "data blocks", inode->i_ino, to_free, ei->i_reserved_data_blocks); WARN_ON(1); to_free = ei->i_reserved_data_blocks; } ei->i_reserved_data_blocks -= to_free; /* update fs dirty data blocks counter */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); } /* * Delayed allocation stuff */ struct mpage_da_data { struct inode *inode; struct writeback_control *wbc; pgoff_t first_page; /* The first page to write */ pgoff_t next_page; /* Current page to examine */ pgoff_t last_page; /* Last page to examine */ /* * Extent to map - this can be after first_page because that can be * fully mapped. We somewhat abuse m_flags to store whether the extent * is delalloc or unwritten. */ struct ext4_map_blocks map; struct ext4_io_submit io_submit; /* IO submission data */ unsigned int do_map:1; }; static void mpage_release_unused_pages(struct mpage_da_data *mpd, bool invalidate) { int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; /* This is necessary when next_page == 0. */ if (mpd->first_page >= mpd->next_page) return; index = mpd->first_page; end = mpd->next_page - 1; if (invalidate) { ext4_lblk_t start, last; start = index << (PAGE_SHIFT - inode->i_blkbits); last = end << (PAGE_SHIFT - inode->i_blkbits); ext4_es_remove_extent(inode, start, last - start + 1); } pagevec_init(&pvec); while (index <= end) { nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); if (invalidate) { if (page_mapped(page)) clear_page_dirty_for_io(page); block_invalidatepage(page, 0, PAGE_SIZE); ClearPageUptodate(page); } unlock_page(page); } pagevec_release(&pvec); } } static void ext4_print_free_blocks(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", EXT4_C2B(EXT4_SB(inode->i_sb), ext4_count_free_clusters(sb))); ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_freeclusters_counter))); ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_dirtyclusters_counter))); ext4_msg(sb, KERN_CRIT, "Block reservation details"); ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", ei->i_reserved_data_blocks); return; } static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); } /* * ext4_insert_delayed_block - adds a delayed block to the extents status * tree, incrementing the reserved cluster/block * count or making a pending reservation * where needed * * @inode - file containing the newly added block * @lblk - logical block to be added * * Returns 0 on success, negative error code on failure. */ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int ret; bool allocated = false; /* * If the cluster containing lblk is shared with a delayed, * written, or unwritten extent in a bigalloc file system, it's * already been accounted for and does not need to be reserved. * A pending reservation must be made for the cluster if it's * shared with a written or unwritten extent and doesn't already * have one. Written and unwritten extents can be purged from the * extents status tree if the system is under memory pressure, so * it's necessary to examine the extent tree if a search of the * extents status tree doesn't get a match. */ if (sbi->s_cluster_ratio == 1) { ret = ext4_da_reserve_space(inode); if (ret != 0) /* ENOSPC */ goto errout; } else { /* bigalloc */ if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { if (!ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk)) { ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk)); if (ret < 0) goto errout; if (ret == 0) { ret = ext4_da_reserve_space(inode); if (ret != 0) /* ENOSPC */ goto errout; } else { allocated = true; } } else { allocated = true; } } } ret = ext4_es_insert_delayed_block(inode, lblk, allocated); errout: return ret; } /* * This function is grabs code from the very beginning of * ext4_map_blocks, but assumes that the caller is from delayed write * time. This function looks up the requested blocks and sets the * buffer delay bit under the protection of i_data_sem. */ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, struct ext4_map_blocks *map, struct buffer_head *bh) { struct extent_status es; int retval; sector_t invalid_block = ~((sector_t) 0xffff); #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; map->m_flags = 0; ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," "logical block %lu\n", inode->i_ino, map->m_len, (unsigned long) map->m_lblk); /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { if (ext4_es_is_hole(&es)) { retval = 0; down_read(&EXT4_I(inode)->i_data_sem); goto add_delayed; } /* * Delayed extent could be allocated by fallocate. * So we need to check it. */ if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); return 0; } map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; retval = es.es_len - (iblock - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; if (ext4_es_is_written(&es)) map->m_flags |= EXT4_MAP_MAPPED; else if (ext4_es_is_unwritten(&es)) map->m_flags |= EXT4_MAP_UNWRITTEN; else BUG(); #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); #endif return retval; } /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_has_inline_data(inode)) retval = 0; else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) retval = ext4_ext_map_blocks(NULL, inode, map, 0); else retval = ext4_ind_map_blocks(NULL, inode, map, 0); add_delayed: if (retval == 0) { int ret; /* * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ ret = ext4_insert_delayed_block(inode, map->m_lblk); if (ret != 0) { retval = ret; goto out_unlock; } map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); } else if (retval > 0) { int ret; unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret != 0) retval = ret; } out_unlock: up_read((&EXT4_I(inode)->i_data_sem)); return retval; } /* * This is a special get_block_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. * * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. * We also have b_blocknr = -1 and b_bdev initialized properly * * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev * initialized properly. */ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { struct ext4_map_blocks map; int ret = 0; BUG_ON(create == 0); BUG_ON(bh->b_size != inode->i_sb->s_blocksize); map.m_lblk = iblock; map.m_len = 1; /* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ ret = ext4_da_map_blocks(inode, iblock, &map, bh); if (ret <= 0) return ret; map_bh(bh, inode->i_sb, map.m_pblk); ext4_update_bh_state(bh, map.m_flags); if (buffer_unwritten(bh)) { /* A delayed write to unwritten bh should be marked * new and mapped. Mapped ensures that we don't do * get_block multiple times when we write to the same * offset and new ensures that we do proper zero out * for partial write. */ set_buffer_new(bh); set_buffer_mapped(bh); } return 0; } static int bget_one(handle_t *handle, struct buffer_head *bh) { get_bh(bh); return 0; } static int bput_one(handle_t *handle, struct buffer_head *bh) { put_bh(bh); return 0; } static int __ext4_journalled_writepage(struct page *page, unsigned int len) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs = NULL; handle_t *handle = NULL; int ret = 0, err = 0; int inline_data = ext4_has_inline_data(inode); struct buffer_head *inode_bh = NULL; ClearPageChecked(page); if (inline_data) { BUG_ON(page->index != 0); BUG_ON(len > ext4_get_max_inline_size(inode)); inode_bh = ext4_journalled_write_inline_data(inode, len, page); if (inode_bh == NULL) goto out; } else { page_bufs = page_buffers(page); if (!page_bufs) { BUG(); goto out; } ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); } /* * We need to release the page lock before we start the * journal, so grab a reference so the page won't disappear * out from under us. */ get_page(page); unlock_page(page); handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); put_page(page); goto out_no_pagelock; } BUG_ON(!ext4_handle_valid(handle)); lock_page(page); put_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ ext4_journal_stop(handle); ret = 0; goto out; } if (inline_data) { ret = ext4_mark_inode_dirty(handle, inode); } else { ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, write_end_fn); } if (ret == 0) ret = err; EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; err = ext4_journal_stop(handle); if (!ret) ret = err; if (!ext4_has_inline_data(inode)) ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, bput_one); ext4_set_inode_state(inode, EXT4_STATE_JDATA); out: unlock_page(page); out_no_pagelock: brelse(inode_bh); return ret; } /* * Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't * need to file the inode to the transaction's list in ordered mode because if * we are writing back data added by write(), the inode is already there and if * we are writing back data modified via mmap(), no one guarantees in which * transaction the data will hit the disk. In case we are journaling data, we * cannot start transaction directly because transaction start ranks above page * lock so we have to do some magic. * * This function can get called via... * - ext4_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) * * We don't do any block allocation in this function. If we have page with * multiple blocks we need to write those buffer_heads that are mapped. This * is important for mmaped based write. So if we do with blocksize 1K * truncate(f, 1024); * a = mmap(f, 0, 4096); * a[0] = 'a'; * truncate(f, 4096); * we have in the page first buffer_head mapped via page_mkwrite call back * but other buffer_heads would be unmapped but dirty (dirty done via the * do_wp_page). So writepage should write the first block. If we modify * the mmap area beyond 1024 we will again get a page_fault and the * page_mkwrite callback will do the block allocation and mark the * buffer_heads mapped. * * We redirty the page if we have any buffer_heads that is either delay or * unwritten in the page. * * We can get recursively called as show below. * * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> * ext4_writepage() * * But since we don't do any block allocation we should not deadlock. * Page also have the dirty flag cleared so we don't get recurive page_lock. */ static int ext4_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; loff_t size; unsigned int len; struct buffer_head *page_bufs = NULL; struct inode *inode = page->mapping->host; struct ext4_io_submit io_submit; bool keep_towrite = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { ext4_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return -EIO; } trace_ext4_writepage(page); size = i_size_read(inode); if (page->index == size >> PAGE_SHIFT && !ext4_verity_in_progress(inode)) len = size & ~PAGE_MASK; else len = PAGE_SIZE; page_bufs = page_buffers(page); /* * We cannot do block allocation or other extent handling in this * function. If there are buffers needing that, we have to redirty * the page. But we may reach here when we do a journal commit via * journal_submit_inode_data_buffers() and in that case we must write * allocated buffers to achieve data=ordered mode guarantees. * * Also, if there is only one buffer per page (the fs block * size == the page size), if one buffer needs block * allocation or needs to modify the extent tree to clear the * unwritten flag, we know that the page can't be written at * all, so we might as well refuse the write immediately. * Unfortunately if the block size != page size, we can't as * easily detect this case using ext4_walk_page_buffers(), but * for the extremely common case, this is an optimization that * skips a useless round trip through ext4_bio_write_page(). */ if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); if ((current->flags & PF_MEMALLOC) || (inode->i_sb->s_blocksize == PAGE_SIZE)) { /* * For memory cleaning there's no point in writing only * some buffers. So just bail out. Warn if we came here * from direct reclaim. */ WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC); unlock_page(page); return 0; } keep_towrite = true; } if (PageChecked(page) && ext4_should_journal_data(inode)) /* * It's mmapped pagecache. Add buffers and journal it. There * doesn't seem much point in redirtying the page here. */ return __ext4_journalled_writepage(page, len); ext4_io_submit_init(&io_submit, wbc); io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); if (!io_submit.io_end) { redirty_page_for_writepage(wbc, page); unlock_page(page); return -ENOMEM; } ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); ext4_io_submit(&io_submit); /* Drop io_end reference we got from init */ ext4_put_io_end_defer(io_submit.io_end); return ret; } static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) { int len; loff_t size; int err; BUG_ON(page->index != mpd->first_page); clear_page_dirty_for_io(page); /* * We have to be very careful here! Nothing protects writeback path * against i_size changes and the page can be writeably mapped into * page tables. So an application can be growing i_size and writing * data through mmap while writeback runs. clear_page_dirty_for_io() * write-protects our page in page tables and the page cannot get * written to again until we release page lock. So only after * clear_page_dirty_for_io() we are safe to sample i_size for * ext4_bio_write_page() to zero-out tail of the written page. We rely * on the barrier provided by TestClearPageDirty in * clear_page_dirty_for_io() to make sure i_size is really sampled only * after page tables are updated. */ size = i_size_read(mpd->inode); if (page->index == size >> PAGE_SHIFT && !ext4_verity_in_progress(mpd->inode)) len = size & ~PAGE_MASK; else len = PAGE_SIZE; err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); if (!err) mpd->wbc->nr_to_write--; mpd->first_page++; return err; } #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) /* * mballoc gives us at most this number of blocks... * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). * The rest of mballoc seems to handle chunks up to full group size. */ #define MAX_WRITEPAGES_EXTENT_LEN 2048 /* * mpage_add_bh_to_extent - try to add bh to extent of blocks to map * * @mpd - extent of blocks * @lblk - logical number of the block in the file * @bh - buffer head we want to add to the extent * * The function is used to collect contig. blocks in the same state. If the * buffer doesn't require mapping for writeback and we haven't started the * extent of buffers to map yet, the function returns 'true' immediately - the * caller can write the buffer right away. Otherwise the function returns true * if the block has been added to the extent, false if the block couldn't be * added. */ static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, struct buffer_head *bh) { struct ext4_map_blocks *map = &mpd->map; /* Buffer that doesn't need mapping for writeback? */ if (!buffer_dirty(bh) || !buffer_mapped(bh) || (!buffer_delay(bh) && !buffer_unwritten(bh))) { /* So far no extent to map => we write the buffer right away */ if (map->m_len == 0) return true; return false; } /* First block in the extent? */ if (map->m_len == 0) { /* We cannot map unless handle is started... */ if (!mpd->do_map) return false; map->m_lblk = lblk; map->m_len = 1; map->m_flags = bh->b_state & BH_FLAGS; return true; } /* Don't go larger than mballoc is willing to allocate */ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) return false; /* Can we merge the block to our big extent? */ if (lblk == map->m_lblk + map->m_len && (bh->b_state & BH_FLAGS) == map->m_flags) { map->m_len++; return true; } return false; } /* * mpage_process_page_bufs - submit page buffers for IO or add them to extent * * @mpd - extent of blocks for mapping * @head - the first buffer in the page * @bh - buffer we should start processing from * @lblk - logical number of the block in the file corresponding to @bh * * Walk through page buffers from @bh upto @head (exclusive) and either submit * the page for IO if all buffers in this page were mapped and there's no * accumulated extent of buffers to map or add buffers in the page to the * extent of buffers to map. The function returns 1 if the caller can continue * by processing the next page, 0 if it should stop adding buffers to the * extent to map because we cannot extend it anymore. It can also return value * < 0 in case of error during IO submission. */ static int mpage_process_page_bufs(struct mpage_da_data *mpd, struct buffer_head *head, struct buffer_head *bh, ext4_lblk_t lblk) { struct inode *inode = mpd->inode; int err; ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) >> inode->i_blkbits; if (ext4_verity_in_progress(inode)) blocks = EXT_MAX_BLOCKS; do { BUG_ON(buffer_locked(bh)); if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { /* Found extent to map? */ if (mpd->map.m_len) return 0; /* Buffer needs mapping and handle is not started? */ if (!mpd->do_map) return 0; /* Everything mapped so far and we hit EOF */ break; } } while (lblk++, (bh = bh->b_this_page) != head); /* So far everything mapped? Submit the page for IO. */ if (mpd->map.m_len == 0) { err = mpage_submit_page(mpd, head->b_page); if (err < 0) return err; } return lblk < blocks; } /* * mpage_process_page - update page buffers corresponding to changed extent and * may submit fully mapped page for IO * * @mpd - description of extent to map, on return next extent to map * @m_lblk - logical block mapping. * @m_pblk - corresponding physical mapping. * @map_bh - determines on return whether this page requires any further * mapping or not. * Scan given page buffers corresponding to changed extent and update buffer * state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits. * If the given page is not fully mapped, we update @map to the next extent in * the given page that needs mapping & return @map_bh as true. */ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page, ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk, bool *map_bh) { struct buffer_head *head, *bh; ext4_io_end_t *io_end = mpd->io_submit.io_end; ext4_lblk_t lblk = *m_lblk; ext4_fsblk_t pblock = *m_pblk; int err = 0; int blkbits = mpd->inode->i_blkbits; ssize_t io_end_size = 0; struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end); bh = head = page_buffers(page); do { if (lblk < mpd->map.m_lblk) continue; if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { /* * Buffer after end of mapped extent. * Find next buffer in the page to map. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; io_end_vec->size += io_end_size; io_end_size = 0; err = mpage_process_page_bufs(mpd, head, bh, lblk); if (err > 0) err = 0; if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) { io_end_vec = ext4_alloc_io_end_vec(io_end); io_end_vec->offset = mpd->map.m_lblk << blkbits; } *map_bh = true; goto out; } if (buffer_delay(bh)) { clear_buffer_delay(bh); bh->b_blocknr = pblock++; } clear_buffer_unwritten(bh); io_end_size += (1 << blkbits); } while (lblk++, (bh = bh->b_this_page) != head); io_end_vec->size += io_end_size; io_end_size = 0; *map_bh = false; out: *m_lblk = lblk; *m_pblk = pblock; return err; } /* * mpage_map_buffers - update buffers corresponding to changed extent and * submit fully mapped pages for IO * * @mpd - description of extent to map, on return next extent to map * * Scan buffers corresponding to changed extent (we expect corresponding pages * to be already locked) and update buffer state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits, * and mark buffers as uninit when we perform writes to unwritten extents * and do extent conversion after IO is finished. If the last page is not fully * mapped, we update @map to the next extent in the last page that needs * mapping. Otherwise we submit the page for IO. */ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) { struct pagevec pvec; int nr_pages, i; struct inode *inode = mpd->inode; int bpp_bits = PAGE_SHIFT - inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; ext4_fsblk_t pblock; int err; bool map_bh = false; start = mpd->map.m_lblk >> bpp_bits; end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; lblk = start << bpp_bits; pblock = mpd->map.m_pblk; pagevec_init(&pvec); while (start <= end) { nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &start, end); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; err = mpage_process_page(mpd, page, &lblk, &pblock, &map_bh); /* * If map_bh is true, means page may require further bh * mapping, or maybe the page was submitted for IO. * So we return to call further extent mapping. */ if (err < 0 || map_bh == true) goto out; /* Page fully mapped - let IO run! */ err = mpage_submit_page(mpd, page); if (err < 0) goto out; } pagevec_release(&pvec); } /* Extent fully mapped and matches with page boundary. We are done. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; return 0; out: pagevec_release(&pvec); return err; } static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int get_blocks_flags; int err, dioread_nolock; trace_ext4_da_write_pages_extent(inode, map); /* * Call ext4_map_blocks() to allocate any delayed allocation blocks, or * to convert an unwritten extent to be initialized (in the case * where we have written into one or more preallocated blocks). It is * possible that we're going to need more metadata blocks than * previously reserved. However we must not fail because we're in * writeback and there is nothing we can do about it so it might result * in data loss. So use reserved blocks to allocate metadata if * possible. * * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if * the blocks in question are delalloc blocks. This indicates * that the blocks and quotas has already been checked when * the data was copied into the page cache. */ get_blocks_flags = EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_GET_BLOCKS_IO_SUBMIT; dioread_nolock = ext4_should_dioread_nolock(inode); if (dioread_nolock) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; if (map->m_flags & (1 << BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; err = ext4_map_blocks(handle, inode, map, get_blocks_flags); if (err < 0) return err; if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { if (!mpd->io_submit.io_end->handle && ext4_handle_valid(handle)) { mpd->io_submit.io_end->handle = handle->h_rsv_handle; handle->h_rsv_handle = NULL; } ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); } BUG_ON(map->m_len == 0); return 0; } /* * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length * mpd->len and submit pages underlying it for IO * * @handle - handle for journal operations * @mpd - extent to map * @give_up_on_write - we set this to true iff there is a fatal error and there * is no hope of writing the data. The caller should discard * dirty pages to avoid infinite loops. * * The function maps extent starting at mpd->lblk of length mpd->len. If it is * delayed, blocks are allocated, if it is unwritten, we may need to convert * them to initialized or split the described range from larger unwritten * extent. Note that we need not map all the described range since allocation * can return less blocks or the range is covered by more unwritten extents. We * cannot map more because we are limited by reserved transaction credits. On * the other hand we always make sure that the last touched page is fully * mapped so that it can be written out (and thus forward progress is * guaranteed). After mapping we submit all mapped pages for IO. */ static int mpage_map_and_submit_extent(handle_t *handle, struct mpage_da_data *mpd, bool *give_up_on_write) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int err; loff_t disksize; int progress = 0; ext4_io_end_t *io_end = mpd->io_submit.io_end; struct ext4_io_end_vec *io_end_vec = ext4_alloc_io_end_vec(io_end); io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits; do { err = mpage_map_one_extent(handle, mpd); if (err < 0) { struct super_block *sb = inode->i_sb; if (ext4_forced_shutdown(EXT4_SB(sb)) || EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) goto invalidate_dirty_pages; /* * Let the uper layers retry transient errors. * In the case of ENOSPC, if ext4_count_free_blocks() * is non-zero, a commit should free up blocks. */ if ((err == -ENOMEM) || (err == -ENOSPC && ext4_count_free_clusters(sb))) { if (progress) goto update_disksize; return err; } ext4_msg(sb, KERN_CRIT, "Delayed block allocation failed for " "inode %lu at logical offset %llu with" " max blocks %u with error %d", inode->i_ino, (unsigned long long)map->m_lblk, (unsigned)map->m_len, -err); ext4_msg(sb, KERN_CRIT, "This should not happen!! Data will " "be lost\n"); if (err == -ENOSPC) ext4_print_free_blocks(inode); invalidate_dirty_pages: *give_up_on_write = true; return err; } progress = 1; /* * Update buffer state, submit mapped pages, and get us new * extent to map */ err = mpage_map_and_submit_buffers(mpd); if (err < 0) goto update_disksize; } while (map->m_len); update_disksize: /* * Update on-disk size after IO is submitted. Races with * truncate are avoided by checking i_size under i_data_sem. */ disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; if (disksize > EXT4_I(inode)->i_disksize) { int err2; loff_t i_size; down_write(&EXT4_I(inode)->i_data_sem); i_size = i_size_read(inode); if (disksize > i_size) disksize = i_size; if (disksize > EXT4_I(inode)->i_disksize) EXT4_I(inode)->i_disksize = disksize; up_write(&EXT4_I(inode)->i_data_sem); err2 = ext4_mark_inode_dirty(handle, inode); if (err2) ext4_error(inode->i_sb, "Failed to mark inode %lu dirty", inode->i_ino); if (!err) err = err2; } return err; } /* * Calculate the total number of credits to reserve for one writepages * iteration. This is called from ext4_writepages(). We map an extent of * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + * bpp - 1 blocks in bpp different extents. */ static int ext4_da_writepages_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); return ext4_meta_trans_blocks(inode, MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); } /* * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages * and underlying extent to map * * @mpd - where to look for pages * * Walk dirty pages in the mapping. If they are fully mapped, submit them for * IO immediately. When we find a page which isn't mapped we start accumulating * extent of buffers underlying these pages that needs mapping (formed by * either delayed or unwritten buffers). We also lock the pages containing * these buffers. The extent found is returned in @mpd structure (starting at * mpd->lblk with length mpd->len blocks). * * Note that this function can attach bios to one io_end structure which are * neither logically nor physically contiguous. Although it may seem as an * unnecessary complication, it is actually inevitable in blocksize < pagesize * case as we need to track IO to all buffers underlying a page in one io_end. */ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) { struct address_space *mapping = mpd->inode->i_mapping; struct pagevec pvec; unsigned int nr_pages; long left = mpd->wbc->nr_to_write; pgoff_t index = mpd->first_page; pgoff_t end = mpd->last_page; xa_mark_t tag; int i, err = 0; int blkbits = mpd->inode->i_blkbits; ext4_lblk_t lblk; struct buffer_head *head; if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; pagevec_init(&pvec); mpd->map.m_len = 0; mpd->next_page = index; while (index <= end) { nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, tag); if (nr_pages == 0) goto out; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * Accumulated enough dirty pages? This doesn't apply * to WB_SYNC_ALL mode. For integrity sync we have to * keep going because someone may be concurrently * dirtying pages, and we might have synced a lot of * newly appeared dirty pages, but have not synced all * of the old dirty pages. */ if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) goto out; /* If we can't merge this page, we are done. */ if (mpd->map.m_len > 0 && mpd->next_page != page->index) goto out; lock_page(page); /* * If the page is no longer dirty, or its mapping no * longer corresponds to inode we are writing (which * means it has been truncated or invalidated), or the * page is already under writeback and we are not doing * a data integrity writeback, skip the page */ if (!PageDirty(page) || (PageWriteback(page) && (mpd->wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { unlock_page(page); continue; } wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; /* Add all dirty buffers to mpd */ lblk = ((ext4_lblk_t)page->index) << (PAGE_SHIFT - blkbits); head = page_buffers(page); err = mpage_process_page_bufs(mpd, head, head, lblk); if (err <= 0) goto out; err = 0; left--; } pagevec_release(&pvec); cond_resched(); } return 0; out: pagevec_release(&pvec); return err; } static int ext4_writepages(struct address_space *mapping, struct writeback_control *wbc) { pgoff_t writeback_index = 0; long nr_to_write = wbc->nr_to_write; int range_whole = 0; int cycled = 1; handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; int needed_blocks, rsv_blocks = 0, ret = 0; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); bool done; struct blk_plug plug; bool give_up_on_write = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; percpu_down_read(&sbi->s_journal_flag_rwsem); trace_ext4_writepages(inode, wbc); /* * No pages to write? This is mainly a kludge to avoid starting * a transaction for special inodes like journal inode on last iput() * because that could violate lock ordering on umount */ if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) goto out_writepages; if (ext4_should_journal_data(inode)) { ret = generic_writepages(mapping, wbc); goto out_writepages; } /* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because * the latter could be true if the filesystem is mounted * read-only, and in that case, ext4_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { ret = -EROFS; goto out_writepages; } /* * If we have inline data and arrive here, it means that * we will soon create the block for the 1st page, so * we'd better clear the inline data here. */ if (ext4_has_inline_data(inode)) { /* Just inode will be modified... */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_writepages; } BUG_ON(ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)); ext4_destroy_inline_data(handle, inode); ext4_journal_stop(handle); } if (ext4_should_dioread_nolock(inode)) { /* * We may need to convert up to one extent per block in * the page and we may dirty the inode. */ rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, PAGE_SIZE >> inode->i_blkbits); } if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; if (writeback_index) cycled = 0; mpd.first_page = writeback_index; mpd.last_page = -1; } else { mpd.first_page = wbc->range_start >> PAGE_SHIFT; mpd.last_page = wbc->range_end >> PAGE_SHIFT; } mpd.inode = inode; mpd.wbc = wbc; ext4_io_submit_init(&mpd.io_submit, wbc); retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); done = false; blk_start_plug(&plug); /* * First writeback pages that don't need mapping - we can avoid * starting a transaction unnecessarily and also avoid being blocked * in the block layer on device congestion while having transaction * started. */ mpd.do_map = 0; mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { ret = -ENOMEM; goto unplug; } ret = mpage_prepare_extent_to_map(&mpd); /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, false); /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); ext4_put_io_end_defer(mpd.io_submit.io_end); mpd.io_submit.io_end = NULL; if (ret < 0) goto unplug; while (!done && mpd.first_page <= mpd.last_page) { /* For each extent of pages we use new io_end */ mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { ret = -ENOMEM; break; } /* * We have two constraints: We find one extent to map and we * must always write out whole page (makes a difference when * blocksize < pagesize) so that we don't block on IO when we * try to write out the rest of the page. Journalled mode is * not supported by delalloc. */ BUG_ON(ext4_should_journal_data(inode)); needed_blocks = ext4_da_writepages_trans_blocks(inode); /* start a new transaction */ handle = ext4_journal_start_with_reserve(inode, EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " "%ld pages, ino %lu; err %d", __func__, wbc->nr_to_write, inode->i_ino, ret); /* Release allocated io_end */ ext4_put_io_end(mpd.io_submit.io_end); mpd.io_submit.io_end = NULL; break; } mpd.do_map = 1; trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); ret = mpage_prepare_extent_to_map(&mpd); if (!ret) { if (mpd.map.m_len) ret = mpage_map_and_submit_extent(handle, &mpd, &give_up_on_write); else { /* * We scanned the whole range (or exhausted * nr_to_write), submitted what was mapped and * didn't find anything needing mapping. We are * done. */ done = true; } } /* * Caution: If the handle is synchronous, * ext4_journal_stop() can wait for transaction commit * to finish which may depend on writeback of pages to * complete or on page lock to be released. In that * case, we have to wait until after after we have * submitted all the IO, released page locks we hold, * and dropped io_end reference (for extent conversion * to be able to complete) before stopping the handle. */ if (!ext4_handle_valid(handle) || handle->h_sync == 0) { ext4_journal_stop(handle); handle = NULL; mpd.do_map = 0; } /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, give_up_on_write); /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); /* * Drop our io_end reference we got from init. We have * to be careful and use deferred io_end finishing if * we are still holding the transaction as we can * release the last reference to io_end which may end * up doing unwritten extent conversion. */ if (handle) { ext4_put_io_end_defer(mpd.io_submit.io_end); ext4_journal_stop(handle); } else ext4_put_io_end(mpd.io_submit.io_end); mpd.io_submit.io_end = NULL; if (ret == -ENOSPC && sbi->s_journal) { /* * Commit the transaction which would * free blocks released in the transaction * and try again */ jbd2_journal_force_commit_nested(sbi->s_journal); ret = 0; continue; } /* Fatal error - ENOMEM, EIO... */ if (ret) break; } unplug: blk_finish_plug(&plug); if (!ret && !cycled && wbc->nr_to_write > 0) { cycled = 1; mpd.last_page = writeback_index - 1; mpd.first_page = 0; goto retry; } /* Update index */ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* * Set the writeback_index so that range_cyclic * mode will write it back later */ mapping->writeback_index = mpd.first_page; out_writepages: trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); percpu_up_read(&sbi->s_journal_flag_rwsem); return ret; } static int ext4_dax_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; long nr_to_write = wbc->nr_to_write; struct inode *inode = mapping->host; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; percpu_down_read(&sbi->s_journal_flag_rwsem); trace_ext4_writepages(inode, wbc); ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc); trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); percpu_up_read(&sbi->s_journal_flag_rwsem); return ret; } static int ext4_nonda_switch(struct super_block *sb) { s64 free_clusters, dirty_clusters; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * switch to non delalloc mode if we are running low * on free block. The free block accounting via percpu * counters can get slightly wrong with percpu_counter_batch getting * accumulated on each CPU without updating global counters * Delalloc need an accurate free block accounting. So switch * to non delalloc when we are near to error range. */ free_clusters = percpu_counter_read_positive(&sbi->s_freeclusters_counter); dirty_clusters = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); /* * Start pushing delalloc when 1/2 of free blocks are dirty. */ if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); if (2 * free_clusters < 3 * dirty_clusters || free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { /* * free block count is less than 150% of dirty blocks * or free blocks is less than watermark */ return 1; } return 0; } /* We always reserve for an inode update; the superblock could be there too */ static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) { if (likely(ext4_has_feature_large_file(inode->i_sb))) return 1; if (pos + len <= 0x7fffffffULL) return 1; /* We might need to update the superblock to set LARGE_FILE */ return 2; } static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret, retries = 0; struct page *page; pgoff_t index; struct inode *inode = mapping->host; handle_t *handle; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; index = pos >> PAGE_SHIFT; if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) || ext4_verity_in_progress(inode)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, len, flags, pagep, fsdata); } *fsdata = (void *)0; trace_ext4_da_write_begin(inode, pos, len, flags); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, flags, pagep, fsdata); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); /* * With delayed allocation, we don't log the i_disksize update * if there is delayed block allocation. But we still need * to journalling the i_disksize update if writes to the end * of file which has an already mapped buffer. */ retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { put_page(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); put_page(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_FS_ENCRYPTION ret = ext4_block_write_begin(page, pos, len, ext4_da_get_block_prep); #else ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); #endif if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) ext4_truncate_failed_write(inode); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; put_page(page); return ret; } *pagep = page; return ret; } /* * Check if we should update i_disksize * when write to the end of file but not require block allocation */ static int ext4_da_should_update_i_disksize(struct page *page, unsigned long offset) { struct buffer_head *bh; struct inode *inode = page->mapping->host; unsigned int idx; int i; bh = page_buffers(page); idx = offset >> inode->i_blkbits; for (i = 0; i < idx; i++) bh = bh->b_this_page; if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) return 0; return 1; } static int ext4_da_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int ret = 0, ret2; handle_t *handle = ext4_journal_current_handle(); loff_t new_i_size; unsigned long start, end; int write_mode = (int)(unsigned long)fsdata; if (write_mode == FALL_BACK_TO_NONDELALLOC) return ext4_write_end(file, mapping, pos, len, copied, page, fsdata); trace_ext4_da_write_end(inode, pos, len, copied); start = pos & (PAGE_SIZE - 1); end = start + copied - 1; /* * generic_write_end() will run mark_inode_dirty() if i_size * changes. So let's piggyback the i_disksize mark_inode_dirty * into that. */ new_i_size = pos + copied; if (copied && new_i_size > EXT4_I(inode)->i_disksize) { if (ext4_has_inline_data(inode) || ext4_da_should_update_i_disksize(page, end)) { ext4_update_i_disksize(inode, new_i_size); /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_mark_inode_dirty(handle, inode); } } if (write_mode != CONVERT_INLINE_DATA && ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && ext4_has_inline_data(inode)) ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, page); else ret2 = generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; return ret ? ret : copied; } /* * Force all delayed allocation blocks to be allocated for a given inode. */ int ext4_alloc_da_blocks(struct inode *inode) { trace_ext4_alloc_da_blocks(inode); if (!EXT4_I(inode)->i_reserved_data_blocks) return 0; /* * We do something simple for now. The filemap_flush() will * also start triggering a write of the data blocks, which is * not strictly speaking necessary (and for users of * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: * * ext4_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() * mpage_da_map_blocks() * * The problem is that write_cache_pages(), located in * mm/page-writeback.c, marks pages clean in preparation for * doing I/O, which is not desirable if we're not planning on * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of * the pages by calling redirty_page_for_writepage() but that * would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, * simplifying them because we wouldn't actually intend to * write out the pages, but rather only collect contiguous * logical block extents, call the multi-block allocator, and * then update the buffer heads with the block allocations. * * For now, though, we'll cheat by calling filemap_flush(), * which will map the blocks, and start the I/O, but not * actually wait for the I/O to complete. */ return filemap_flush(inode->i_mapping); } /* * bmap() is special. It gets used by applications such as lilo and by * the swapper to find the on-disk block of a specific piece of data. * * Naturally, this is dangerous if the block concerned is still in the * journal. If somebody makes a swapfile on an ext4 data-journaling * filesystem and enables swap, then they may get a nasty shock when the * data getting swapped to that swapfile suddenly gets overwritten by * the original zero's written out previously to the journal and * awaiting writeback in the kernel's buffer cache. * * So, if we see any bmap calls here on a modified, data-journaled file, * take extra steps to flush any blocks which might be in the cache. */ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; int err; /* * We can get here for an inline file via the FIBMAP ioctl */ if (ext4_has_inline_data(inode)) return 0; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { /* * With delalloc we want to sync the file * so that we can make sure we allocate * blocks for file */ filemap_write_and_wait(mapping); } if (EXT4_JOURNAL(inode) && ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { /* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * * NB. EXT4_STATE_JDATA is not set on files other than * regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ ext4_clear_inode_state(inode, EXT4_STATE_JDATA); journal = EXT4_JOURNAL(inode); jbd2_journal_lock_updates(journal); err = jbd2_journal_flush(journal); jbd2_journal_unlock_updates(journal); if (err) return 0; } return generic_block_bmap(mapping, block, ext4_get_block); } static int ext4_readpage(struct file *file, struct page *page) { int ret = -EAGAIN; struct inode *inode = page->mapping->host; trace_ext4_readpage(page); if (ext4_has_inline_data(inode)) ret = ext4_readpage_inline(inode, page); if (ret == -EAGAIN) return ext4_mpage_readpages(page->mapping, NULL, page, 1, false); return ret; } static int ext4_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; /* If the file has inline data, no need to do readpages. */ if (ext4_has_inline_data(inode)) return 0; return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); } static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { trace_ext4_invalidatepage(page, offset, length); /* No journalling happens on data buffers when this function is used */ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); block_invalidatepage(page, offset, length); } static int __ext4_journalled_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); trace_ext4_journalled_invalidatepage(page, offset, length); /* * If it's a full truncate we just forget about the pending dirtying */ if (offset == 0 && length == PAGE_SIZE) ClearPageChecked(page); return jbd2_journal_invalidatepage(journal, page, offset, length); } /* Wrapper for aops... */ static void ext4_journalled_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); trace_ext4_releasepage(page); /* Page has dirty journalled data -> cannot release */ if (PageChecked(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); else return try_to_free_buffers(page); } static bool ext4_inode_datasync_dirty(struct inode *inode) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; if (journal) return !jbd2_transaction_committed(journal, EXT4_I(inode)->i_datasync_tid); /* Any metadata buffers to write? */ if (!list_empty(&inode->i_mapping->private_list)) return true; return inode->i_state & I_DIRTY_DATASYNC; } static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, struct ext4_map_blocks *map, loff_t offset, loff_t length) { u8 blkbits = inode->i_blkbits; /* * Writes that span EOF might trigger an I/O size update on completion, * so consider them to be dirty for the purpose of O_DSYNC, even if * there is no other metadata changes being made or are pending. */ iomap->flags = 0; if (ext4_inode_datasync_dirty(inode) || offset + length > i_size_read(inode)) iomap->flags |= IOMAP_F_DIRTY; if (map->m_flags & EXT4_MAP_NEW) iomap->flags |= IOMAP_F_NEW; iomap->bdev = inode->i_sb->s_bdev; iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; iomap->offset = (u64) map->m_lblk << blkbits; iomap->length = (u64) map->m_len << blkbits; /* * Flags passed to ext4_map_blocks() for direct I/O writes can result * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits * set. In order for any allocated unwritten extents to be converted * into written extents correctly within the ->end_io() handler, we * need to ensure that the iomap->type is set appropriately. Hence, the * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has * been set first. */ if (map->m_flags & EXT4_MAP_UNWRITTEN) { iomap->type = IOMAP_UNWRITTEN; iomap->addr = (u64) map->m_pblk << blkbits; } else if (map->m_flags & EXT4_MAP_MAPPED) { iomap->type = IOMAP_MAPPED; iomap->addr = (u64) map->m_pblk << blkbits; } else { iomap->type = IOMAP_HOLE; iomap->addr = IOMAP_NULL_ADDR; } } static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, unsigned int flags) { handle_t *handle; u8 blkbits = inode->i_blkbits; int ret, dio_credits, m_flags = 0, retries = 0; /* * Trim the mapping request to the maximum value that we can map at * once for direct I/O. */ if (map->m_len > DIO_MAX_BLOCKS) map->m_len = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, map->m_len); retry: /* * Either we allocate blocks and then don't get an unwritten extent, so * in that case we have reserved enough credits. Or, the blocks are * already allocated and unwritten. In that case, the extent conversion * fits into the credits as well. */ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); if (IS_ERR(handle)) return PTR_ERR(handle); /* * DAX and direct I/O are the only two operations that are currently * supported with IOMAP_WRITE. */ WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT)); if (IS_DAX(inode)) m_flags = EXT4_GET_BLOCKS_CREATE_ZERO; /* * We use i_size instead of i_disksize here because delalloc writeback * can complete at any point during the I/O and subsequently push the * i_disksize out to i_size. This could be beyond where direct I/O is * happening and thus expose allocated blocks to direct I/O reads. */ else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode)) m_flags = EXT4_GET_BLOCKS_CREATE; else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT; ret = ext4_map_blocks(handle, inode, map, m_flags); /* * We cannot fill holes in indirect tree based inodes as that could * expose stale data in the case of a crash. Use the magic error code * to fallback to buffered I/O. */ if (!m_flags && !ret) ret = -ENOTBLK; ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; return ret; } static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { int ret; struct ext4_map_blocks map; u8 blkbits = inode->i_blkbits; if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) return -EINVAL; if (WARN_ON_ONCE(ext4_has_inline_data(inode))) return -ERANGE; /* * Calculate the first and last logical blocks respectively. */ map.m_lblk = offset >> blkbits; map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; if (flags & IOMAP_WRITE) ret = ext4_iomap_alloc(inode, &map, flags); else ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret < 0) return ret; ext4_set_iomap(inode, iomap, &map, offset, length); return 0; } static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { /* * Check to see whether an error occurred while writing out the data to * the allocated blocks. If so, return the magic error code so that we * fallback to buffered I/O and attempt to complete the remainder of * the I/O. Any blocks that may have been allocated in preparation for * the direct I/O will be reused during buffered I/O. */ if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0) return -ENOTBLK; return 0; } const struct iomap_ops ext4_iomap_ops = { .iomap_begin = ext4_iomap_begin, .iomap_end = ext4_iomap_end, }; static bool ext4_iomap_is_delalloc(struct inode *inode, struct ext4_map_blocks *map) { struct extent_status es; ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1; ext4_es_find_extent_range(inode, &ext4_es_is_delayed, map->m_lblk, end, &es); if (!es.es_len || es.es_lblk > end) return false; if (es.es_lblk > map->m_lblk) { map->m_len = es.es_lblk - map->m_lblk; return false; } offset = map->m_lblk - es.es_lblk; map->m_len = es.es_len - offset; return true; } static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { int ret; bool delalloc = false; struct ext4_map_blocks map; u8 blkbits = inode->i_blkbits; if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) return -EINVAL; if (ext4_has_inline_data(inode)) { ret = ext4_inline_data_iomap(inode, iomap); if (ret != -EAGAIN) { if (ret == 0 && offset >= iomap->length) ret = -ENOENT; return ret; } } /* * Calculate the first and last logical block respectively. */ map.m_lblk = offset >> blkbits; map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret < 0) return ret; if (ret == 0) delalloc = ext4_iomap_is_delalloc(inode, &map); ext4_set_iomap(inode, iomap, &map, offset, length); if (delalloc && iomap->type == IOMAP_HOLE) iomap->type = IOMAP_DELALLOC; return 0; } const struct iomap_ops ext4_iomap_report_ops = { .iomap_begin = ext4_iomap_begin_report, }; /* * Pages can be marked dirty completely asynchronously from ext4's journalling * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do * much here because ->set_page_dirty is called under VFS locks. The page is * not necessarily locked. * * We cannot just dirty the page and leave attached buffers clean, because the * buffers' dirty state is "definitive". We cannot just set the buffers dirty * or jbddirty because all the journalling code will explode. * * So what we do is to mark the page "pending dirty" and next time writepage * is called, propagate that into the buffers appropriately. */ static int ext4_journalled_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_nobuffers(page); } static int ext4_set_page_dirty(struct page *page) { WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); WARN_ON_ONCE(!page_has_buffers(page)); return __set_page_dirty_buffers(page); } static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_journalled_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_dax_aops = { .writepages = ext4_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = noop_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = noop_invalidatepage, }; void ext4_set_aops(struct inode *inode) { switch (ext4_inode_journal_mode(inode)) { case EXT4_INODE_ORDERED_DATA_MODE: case EXT4_INODE_WRITEBACK_DATA_MODE: break; case EXT4_INODE_JOURNAL_DATA_MODE: inode->i_mapping->a_ops = &ext4_journalled_aops; return; default: BUG(); } if (IS_DAX(inode)) inode->i_mapping->a_ops = &ext4_dax_aops; else if (test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; else inode->i_mapping->a_ops = &ext4_aops; } static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { ext4_fsblk_t index = from >> PAGE_SHIFT; unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; int err = 0; page = find_or_create_page(mapping, from >> PAGE_SHIFT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) return -ENOMEM; blocksize = inode->i_sb->s_blocksize; iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } if (buffer_freed(bh)) { BUFFER_TRACE(bh, "freed: skip"); goto unlock; } if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "unmapped"); ext4_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto unlock; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(REQ_OP_READ, 0, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks( page, blocksize, bh_offset(bh))); } } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto unlock; } zero_user(page, offset, length); BUFFER_TRACE(bh, "zeroed end of block"); if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); } else { err = 0; mark_buffer_dirty(bh); if (ext4_should_order_data(inode)) err = ext4_jbd2_inode_add_write(handle, inode, from, length); } unlock: unlock_page(page); put_page(page); return err; } /* * ext4_block_zero_page_range() zeros out a mapping of length 'length' * starting from file offset 'from'. The range to be zero'd must * be contained with in one block. If the specified range exceeds * the end of the block it will be shortened to end of the block * that cooresponds to 'from' */ static int ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { struct inode *inode = mapping->host; unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned max = blocksize - (offset & (blocksize - 1)); /* * correct length if it does not fall between * 'from' and the end of the block */ if (length > max || length < 0) length = max; if (IS_DAX(inode)) { return iomap_zero_range(inode, from, length, NULL, &ext4_iomap_ops); } return __ext4_block_zero_page_range(handle, mapping, from, length); } /* * ext4_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */ static int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from) { unsigned offset = from & (PAGE_SIZE-1); unsigned length; unsigned blocksize; struct inode *inode = mapping->host; /* If we are processing an encrypted inode during orphan list handling */ if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode)) return 0; blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); return ext4_block_zero_page_range(handle, mapping, from, length); } int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, loff_t lstart, loff_t length) { struct super_block *sb = inode->i_sb; struct address_space *mapping = inode->i_mapping; unsigned partial_start, partial_end; ext4_fsblk_t start, end; loff_t byte_end = (lstart + length - 1); int err = 0; partial_start = lstart & (sb->s_blocksize - 1); partial_end = byte_end & (sb->s_blocksize - 1); start = lstart >> sb->s_blocksize_bits; end = byte_end >> sb->s_blocksize_bits; /* Handle partial zero within the single block */ if (start == end && (partial_start || (partial_end != sb->s_blocksize - 1))) { err = ext4_block_zero_page_range(handle, mapping, lstart, length); return err; } /* Handle partial zero out on the start of the range */ if (partial_start) { err = ext4_block_zero_page_range(handle, mapping, lstart, sb->s_blocksize); if (err) return err; } /* Handle partial zero out on the end of the range */ if (partial_end != sb->s_blocksize - 1) err = ext4_block_zero_page_range(handle, mapping, byte_end - partial_end, partial_end + 1); return err; } int ext4_can_truncate(struct inode *inode) { if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) return 1; if (S_ISLNK(inode->i_mode)) return !ext4_inode_is_fast_symlink(inode); return 0; } /* * We have to make sure i_disksize gets properly updated before we truncate * page cache due to hole punching or zero range. Otherwise i_disksize update * can get lost as it may have been postponed to submission of writeback but * that will never happen after we truncate page cache. */ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t len) { handle_t *handle; loff_t size = i_size_read(inode); WARN_ON(!inode_is_locked(inode)); if (offset > size || offset + len < size) return 0; if (EXT4_I(inode)->i_disksize >= size) return 0; handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); if (IS_ERR(handle)) return PTR_ERR(handle); ext4_update_i_disksize(inode, size); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); return 0; } static void ext4_wait_dax_page(struct ext4_inode_info *ei) { up_write(&ei->i_mmap_sem); schedule(); down_write(&ei->i_mmap_sem); } int ext4_break_layouts(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct page *page; int error; if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) return -EINVAL; do { page = dax_layout_busy_page(inode->i_mapping); if (!page) return 0; error = ___wait_var_event(&page->_refcount, atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, 0, 0, ext4_wait_dax_page(ei)); } while (error == 0); return error; } /* * ext4_punch_hole: punches a hole in a file by releasing the blocks * associated with the given offset and length * * @inode: File inode * @offset: The offset where the hole will begin * @len: The length of the hole * * Returns: 0 on success or negative on failure */ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_punch_hole(inode, offset, length, 0); ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); if (ext4_has_inline_data(inode)) { down_write(&EXT4_I(inode)->i_mmap_sem); ret = ext4_convert_inline_data(inode); up_write(&EXT4_I(inode)->i_mmap_sem); if (ret) return ret; } /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (ret) return ret; } inode_lock(inode); /* No need to punch hole beyond i_size */ if (offset >= inode->i_size) goto out_mutex; /* * If the hole extends beyond i_size, set the hole * to end after the page that contains i_size */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - offset; } if (offset & (sb->s_blocksize - 1) || (offset + length) & (sb->s_blocksize - 1)) { /* * Attach jinode to inode for jbd2 if we do any zeroing of * partial block */ ret = ext4_inode_attach_jinode(inode); if (ret < 0) goto out_mutex; } /* Wait all existing dio workers, newcomers will block on i_mutex */ inode_dio_wait(inode); /* * Prevent page faults from reinstantiating pages we have released from * page cache. */ down_write(&EXT4_I(inode)->i_mmap_sem); ret = ext4_break_layouts(inode); if (ret) goto out_dio; first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; /* Now release the pages and zero block aligned part of pages*/ if (last_block_offset > first_block_offset) { ret = ext4_update_disksize_before_punch(inode, offset, length); if (ret) goto out_dio; truncate_pagecache_range(inode, first_block_offset, last_block_offset); } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(sb, ret); goto out_dio; } ret = ext4_zero_partial_blocks(handle, inode, offset, length); if (ret) goto out_stop; first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); /* If there are blocks to remove, do it */ if (stop_block > first_block) { down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_remove_space(inode, first_block, stop_block - 1); else ret = ext4_ind_remove_space(handle, inode, first_block, stop_block); up_write(&EXT4_I(inode)->i_data_sem); } if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = current_time(inode); ext4_mark_inode_dirty(handle, inode); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out_stop: ext4_journal_stop(handle); out_dio: up_write(&EXT4_I(inode)->i_mmap_sem); out_mutex: inode_unlock(inode); return ret; } int ext4_inode_attach_jinode(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct jbd2_inode *jinode; if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) return 0; jinode = jbd2_alloc_inode(GFP_KERNEL); spin_lock(&inode->i_lock); if (!ei->jinode) { if (!jinode) { spin_unlock(&inode->i_lock); return -ENOMEM; } ei->jinode = jinode; jbd2_journal_init_jbd_inode(ei->jinode, inode); jinode = NULL; } spin_unlock(&inode->i_lock); if (unlikely(jinode != NULL)) jbd2_free_inode(jinode); return 0; } /* * ext4_truncate() * * We block out ext4_get_block() block instantiations across the entire * transaction, and VFS/VM ensures that ext4_truncate() cannot run * simultaneously on behalf of the same inode. * * As we work through the truncate and commit bits of it to the journal there * is one core, guiding principle: the file's tree must always be consistent on * disk. We must be able to restart the truncate after a crash. * * The file's tree may be transiently inconsistent in memory (although it * probably isn't), but whenever we close off and commit a journal transaction, * the contents of (the filesystem + the journal) must be consistent and * restartable. It's pretty simple, really: bottom up, right to left (although * left-to-right works OK too). * * Note that at recovery time, journal replay occurs *before* the restart of * truncate against the orphan inode list. * * The committed inode has the new, desired i_size (which is the same as * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see * that this inode's truncate did not complete and it will again call * ext4_truncate() to have another go. So there will be instantiated blocks * to the right of the truncation point in a crashed ext4 filesystem. But * that's fine - as long as they are linked from the inode, the post-crash * ext4_truncate() run will find them and release them. */ int ext4_truncate(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); unsigned int credits; int err = 0; handle_t *handle; struct address_space *mapping = inode->i_mapping; /* * There is a possibility that we're either freeing the inode * or it's a completely new inode. In those cases we might not * have i_mutex locked because it's not necessary. */ if (!(inode->i_state & (I_NEW|I_FREEING))) WARN_ON(!inode_is_locked(inode)); trace_ext4_truncate_enter(inode); if (!ext4_can_truncate(inode)) return 0; ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); if (ext4_has_inline_data(inode)) { int has_inline = 1; err = ext4_inline_data_truncate(inode, &has_inline); if (err) return err; if (has_inline) return 0; } /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { if (ext4_inode_attach_jinode(inode) < 0) return 0; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) return PTR_ERR(handle); if (inode->i_size & (inode->i_sb->s_blocksize - 1)) ext4_block_truncate_page(handle, mapping, inode->i_size); /* * We add the inode to the orphan list, so that if this * truncate spans multiple transactions, and we crash, we will * resume the truncate when the filesystem recovers. It also * marks the inode dirty, to catch the new size. * * Implication: the file must always be in a sane, consistent * truncatable state while each transaction commits. */ err = ext4_orphan_add(handle, inode); if (err) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) err = ext4_ext_truncate(handle, inode); else ext4_ind_truncate(handle, inode); up_write(&ei->i_data_sem); if (err) goto out_stop; if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: /* * If this was a simple ftruncate() and the file will remain alive, * then we need to clear up the orphan record which we created above. * However, if this was a real unlink then we were called by * ext4_evict_inode(), and we allow that function to clean up the * orphan info for us. */ if (inode->i_nlink) ext4_orphan_del(handle, inode); inode->i_mtime = inode->i_ctime = current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); trace_ext4_truncate_exit(inode); return err; } /* * ext4_get_inode_loc returns with an extra refcount against the inode's * underlying buffer_head on success. If 'in_mem' is true, we have all * data in memory that is needed to recreate the on-disk version of this * inode. */ static int __ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc, int in_mem) { struct ext4_group_desc *gdp; struct buffer_head *bh; struct super_block *sb = inode->i_sb; ext4_fsblk_t block; struct blk_plug plug; int inodes_per_block, inode_offset; iloc->bh = NULL; if (inode->i_ino < EXT4_ROOT_INO || inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return -EFSCORRUPTED; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); if (!gdp) return -EIO; /* * Figure out the offset within the block group inode table */ inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; inode_offset = ((inode->i_ino - 1) % EXT4_INODES_PER_GROUP(sb)); block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); bh = sb_getblk(sb, block); if (unlikely(!bh)) return -ENOMEM; if (!buffer_uptodate(bh)) { lock_buffer(bh); /* * If the buffer has the write error flag, we have failed * to write out another inode in the same block. In this * case, we don't have to read the block because we may * read the old inode data successfully. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) set_buffer_uptodate(bh); if (buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; } /* * If we have all information of the inode in memory and this * is the only valid inode in the block, we need not read the * block. */ if (in_mem) { struct buffer_head *bitmap_bh; int i, start; start = inode_offset & ~(inodes_per_block - 1); /* Is the inode bitmap in cache? */ bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); if (unlikely(!bitmap_bh)) goto make_io; /* * If the inode bitmap isn't in cache then the * optimisation may end up performing two reads instead * of one, so skip it. */ if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); goto make_io; } for (i = start; i < start + inodes_per_block; i++) { if (i == inode_offset) continue; if (ext4_test_bit(i, bitmap_bh->b_data)) break; } brelse(bitmap_bh); if (i == start + inodes_per_block) { /* all other inodes are free, so skip I/O */ memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); unlock_buffer(bh); goto has_buffer; } } make_io: /* * If we need to do any I/O, try to pre-readahead extra * blocks from the inode table. */ blk_start_plug(&plug); if (EXT4_SB(sb)->s_inode_readahead_blks) { ext4_fsblk_t b, end, table; unsigned num; __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; table = ext4_inode_table(sb, gdp); /* s_inode_readahead_blks is always a power of 2 */ b = block & ~((ext4_fsblk_t) ra_blks - 1); if (table > b) b = table; end = b + ra_blks; num = EXT4_INODES_PER_GROUP(sb); if (ext4_has_group_desc_csum(sb)) num -= ext4_itable_unused_count(sb, gdp); table += num / inodes_per_block; if (end > table) end = table; while (b <= end) sb_breadahead(sb, b++); } /* * There are other valid inodes in the buffer, this inode * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ trace_ext4_load_inode(inode); get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); blk_finish_plug(&plug); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { EXT4_ERROR_INODE_BLOCK(inode, block, "unable to read itable block"); brelse(bh); return -EIO; } } has_buffer: iloc->bh = bh; return 0; } int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { /* We have all inode data except xattrs in memory here. */ return __ext4_get_inode_loc(inode, iloc, !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); } static bool ext4_should_use_dax(struct inode *inode) { if (!test_opt(inode->i_sb, DAX)) return false; if (!S_ISREG(inode->i_mode)) return false; if (ext4_should_journal_data(inode)) return false; if (ext4_has_inline_data(inode)) return false; if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT)) return false; if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY)) return false; return true; } void ext4_set_inode_flags(struct inode *inode) { unsigned int flags = EXT4_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & EXT4_SYNC_FL) new_fl |= S_SYNC; if (flags & EXT4_APPEND_FL) new_fl |= S_APPEND; if (flags & EXT4_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & EXT4_NOATIME_FL) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; if (ext4_should_use_dax(inode)) new_fl |= S_DAX; if (flags & EXT4_ENCRYPT_FL) new_fl |= S_ENCRYPTED; if (flags & EXT4_CASEFOLD_FL) new_fl |= S_CASEFOLD; if (flags & EXT4_VERITY_FL) new_fl |= S_VERITY; inode_set_flags(inode, new_fl, S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| S_ENCRYPTED|S_CASEFOLD|S_VERITY); } static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { blkcnt_t i_blocks ; struct inode *inode = &(ei->vfs_inode); struct super_block *sb = inode->i_sb; if (ext4_has_feature_huge_file(sb)) { /* we are using combined 48 bit field */ i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | le32_to_cpu(raw_inode->i_blocks_lo); if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { /* i_blocks represent file system block size */ return i_blocks << (inode->i_blkbits - 9); } else { return i_blocks; } } else { return le32_to_cpu(raw_inode->i_blocks_lo); } } static inline int ext4_iget_extra_inode(struct inode *inode, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= EXT4_INODE_SIZE(inode->i_sb) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); return ext4_find_inline_data_nolock(inode); } else EXT4_I(inode)->i_inline_off = 0; return 0; } int ext4_get_projid(struct inode *inode, kprojid_t *projid) { if (!ext4_has_feature_project(inode->i_sb)) return -EOPNOTSUPP; *projid = EXT4_I(inode)->i_projid; return 0; } /* * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag * set. */ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) { if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) inode_set_iversion_raw(inode, val); else inode_set_iversion_queried(inode, val); } static inline u64 ext4_inode_peek_iversion(const struct inode *inode) { if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) return inode_peek_iversion_raw(inode); else return inode_peek_iversion(inode); } struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ext4_iget_flags flags, const char *function, unsigned int line) { struct ext4_iloc iloc; struct ext4_inode *raw_inode; struct ext4_inode_info *ei; struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; loff_t size; int block; uid_t i_uid; gid_t i_gid; projid_t i_projid; if ((!(flags & EXT4_IGET_SPECIAL) && (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || (ino < EXT4_ROOT_INO) || (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { if (flags & EXT4_IGET_HANDLE) return ERR_PTR(-ESTALE); __ext4_error(sb, function, line, "inode #%lu: comm %s: iget: illegal inode #", ino, current->comm); return ERR_PTR(-EFSCORRUPTED); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT4_I(inode); iloc.bh = NULL; ret = __ext4_get_inode_loc(inode, &iloc, 0); if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { ext4_error_inode(inode, function, line, 0, "iget: root inode unallocated"); ret = -EFSCORRUPTED; goto bad_inode; } if ((flags & EXT4_IGET_HANDLE) && (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { ret = -ESTALE; goto bad_inode; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > EXT4_INODE_SIZE(inode->i_sb) || (ei->i_extra_isize & 3)) { ext4_error_inode(inode, function, line, 0, "iget: bad extra_isize %u " "(inode size %u)", ei->i_extra_isize, EXT4_INODE_SIZE(inode->i_sb)); ret = -EFSCORRUPTED; goto bad_inode; } } else ei->i_extra_isize = 0; /* Precompute checksum seed for inode metadata */ if (ext4_has_metadata_csum(sb)) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); __le32 gen = raw_inode->i_generation; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum)); ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen)); } if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { ext4_error_inode(inode, function, line, 0, "iget: checksum invalid"); ret = -EFSBADCRC; goto bad_inode; } inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (ext4_has_feature_project(sb) && EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); else i_projid = EXT4_DEF_PROJID; if (!(test_opt(inode->i_sb, NO_UID32))) { i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); ei->i_projid = make_kprojid(&init_user_ns, i_projid); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ei->i_inline_off = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if ((inode->i_mode == 0 || !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && ino != EXT4_BOOT_LOADER_INO) { /* this inode is deleted */ ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. * OR it is the EXT4_BOOT_LOADER_INO which is * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); ext4_set_inode_flags(inode); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(sb, raw_inode); if ((size = i_size_read(inode)) < 0) { ext4_error_inode(inode, function, line, 0, "iget: bad i_size value: %lld", size); ret = -EFSCORRUPTED; goto bad_inode; } ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; ei->i_last_alloc_group = ~0; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (block = 0; block < EXT4_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); /* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { transaction_t *transaction; tid_t tid; read_lock(&journal->j_state_lock); if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; read_unlock(&journal->j_state_lock); ei->i_sync_tid = tid; ei->i_datasync_tid = tid; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; } else { ret = ext4_iget_extra_inode(inode, raw_inode, ei); if (ret) goto bad_inode; } } EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { u64 ivers = le32_to_cpu(raw_inode->i_disk_version); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) ivers |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; } ext4_inode_set_iversion_queried(inode, ivers); } ret = 0; if (ei->i_file_acl && !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { ext4_error_inode(inode, function, line, 0, "iget: bad extended attribute block %llu", ei->i_file_acl); ret = -EFSCORRUPTED; goto bad_inode; } else if (!ext4_has_inline_data(inode)) { /* validate the block references in the inode */ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_check_inode(inode); else ret = ext4_ind_check_inode(inode); } } if (ret) goto bad_inode; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { /* VFS does not allow setting these so must be corruption */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { ext4_error_inode(inode, function, line, 0, "iget: immutable or append flags " "not allowed on symlinks"); ret = -EFSCORRUPTED; goto bad_inode; } if (IS_ENCRYPTED(inode)) { inode->i_op = &ext4_encrypted_symlink_inode_operations; ext4_set_aops(inode); } else if (ext4_inode_is_fast_symlink(inode)) { inode->i_link = (char *)ei->i_data; inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } inode_nohighmem(inode); } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &ext4_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } else if (ino == EXT4_BOOT_LOADER_INO) { make_bad_inode(inode); } else { ret = -EFSCORRUPTED; ext4_error_inode(inode, function, line, 0, "iget: bogus i_mode (%o)", inode->i_mode); goto bad_inode; } if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) ext4_error_inode(inode, function, line, 0, "casefold flag without casefold feature"); brelse(iloc.bh); unlock_new_inode(inode); return inode; bad_inode: brelse(iloc.bh); iget_failed(inode); return ERR_PTR(ret); } static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); u64 i_blocks = inode->i_blocks; struct super_block *sb = inode->i_sb; if (i_blocks <= ~0U) { /* * i_blocks can be represented in a 32 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = 0; ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); return 0; } if (!ext4_has_feature_huge_file(sb)) return -EFBIG; if (i_blocks <= 0xffffffffffffULL) { /* * i_blocks can be represented in a 48 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); } else { ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); /* i_block is stored in file system block size */ i_blocks = i_blocks >> (inode->i_blkbits - 9); raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); } return 0; } struct other_inode { unsigned long orig_ino; struct ext4_inode *raw_inode; }; static int other_inode_match(struct inode * inode, unsigned long ino, void *data) { struct other_inode *oi = (struct other_inode *) data; if ((inode->i_ino != ino) || (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_INODE)) || ((inode->i_state & I_DIRTY_TIME) == 0)) return 0; spin_lock(&inode->i_lock); if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_INODE)) == 0) && (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); ext4_inode_csum_set(inode, oi->raw_inode, ei); spin_unlock(&ei->i_raw_lock); trace_ext4_other_inode_update_time(inode, oi->orig_ino); return -1; } spin_unlock(&inode->i_lock); return -1; } /* * Opportunistically update the other time fields for other inodes in * the same inode table block. */ static void ext4_update_other_inodes_time(struct super_block *sb, unsigned long orig_ino, char *buf) { struct other_inode oi; unsigned long ino; int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; int inode_size = EXT4_INODE_SIZE(sb); oi.orig_ino = orig_ino; /* * Calculate the first inode in the inode table block. Inode * numbers are one-based. That is, the first inode in a block * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). */ ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { if (ino == orig_ino) continue; oi.raw_inode = (struct ext4_inode *) buf; (void) find_inode_nowait(sb, ino, other_inode_match, &oi); } } /* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the * buffer_head in the inode location struct. * * The caller must have write access to iloc->bh. */ static int ext4_do_update_inode(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; struct super_block *sb = inode->i_sb; int err = 0, rc, block; int need_datasync = 0, set_large_file = 0; uid_t i_uid; gid_t i_gid; projid_t i_projid; spin_lock(&ei->i_raw_lock); /* For fields not tracked in the in-memory inode, * initialise them to zero for new inodes. */ if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); raw_inode->i_mode = cpu_to_le16(inode->i_mode); i_uid = i_uid_read(inode); i_gid = i_gid_read(inode); i_projid = from_kprojid(&init_user_ns, ei->i_projid); if (!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if (ei->i_dtime && list_empty(&ei->i_orphan)) { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } else { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(i_gid)); } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); err = ext4_inode_blocks_set(handle, raw_inode, ei); if (err) { spin_unlock(&ei->i_raw_lock); goto out_brelse; } raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; } if (ei->i_disksize > 0x7fffffffULL) { if (!ext4_has_feature_large_file(sb) || EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) set_large_file = 1; } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } } else if (!ext4_has_inline_data(inode)) { for (block = 0; block < EXT4_N_BLOCKS; block++) raw_inode->i_block[block] = ei->i_data[block]; } if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { u64 ivers = ext4_inode_peek_iversion(inode); raw_inode->i_disk_version = cpu_to_le32(ivers); if (ei->i_extra_isize) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) raw_inode->i_version_hi = cpu_to_le32(ivers >> 32); raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); } } BUG_ON(!ext4_has_feature_project(inode->i_sb) && i_projid != EXT4_DEF_PROJID); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) raw_inode->i_projid = cpu_to_le32(i_projid); ext4_inode_csum_set(inode, raw_inode, ei); spin_unlock(&ei->i_raw_lock); if (inode->i_sb->s_flags & SB_LAZYTIME) ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, bh->b_data); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); rc = ext4_handle_dirty_metadata(handle, NULL, bh); if (!err) err = rc; ext4_clear_inode_state(inode, EXT4_STATE_NEW); if (set_large_file) { BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_brelse; ext4_set_feature_large_file(sb); ext4_handle_sync(handle); err = ext4_handle_dirty_super(handle, sb); } ext4_update_inode_fsync_trans(handle, inode, need_datasync); out_brelse: brelse(bh); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_write_inode() * * We are called from a few places: * * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. * Here, there will be no transaction running. We wait for any running * transaction to commit. * * - Within flush work (sys_sync(), kupdate and such). * We wait on commit, if told to. * * - Within iput_final() -> write_inode_now() * We wait on commit, if told to. * * In all cases it is actually safe for us to return without doing anything, * because the inode has been copied into a raw inode buffer in * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL * writeback. * * Note that we are absolutely dependent upon all inode dirtiers doing the * right thing: they *must* call mark_inode_dirty() after dirtying info in * which we are interested. * * It would be a bug for them to not do this. The code: * * mark_inode_dirty(inode) * stuff(); * inode->i_size = expr; * * is in error because write_inode() could occur while `stuff()' is running, * and the new i_size will be lost. Plus the inode will no longer be on the * superblock's dirty inode list. */ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) { int err; if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) || sb_rdonly(inode->i_sb)) return 0; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; if (EXT4_SB(inode->i_sb)->s_journal) { if (ext4_journal_current_handle()) { jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); dump_stack(); return -EIO; } /* * No need to force transaction in WB_SYNC_NONE mode. Also * ext4_sync_fs() will force the commit after everything is * written. */ if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) return 0; err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, EXT4_I(inode)->i_sync_tid); } else { struct ext4_iloc iloc; err = __ext4_get_inode_loc(inode, &iloc, 0); if (err) return err; /* * sync(2) will flush the whole buffer cache. No need to do * it here separately for each inode. */ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, "IO error syncing inode"); err = -EIO; } brelse(iloc.bh); } return err; } /* * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate * buffers that are attached to a page stradding i_size and are undergoing * commit. In that case we have to wait for commit to finish and try again. */ static void ext4_wait_for_tail_page_commit(struct inode *inode) { struct page *page; unsigned offset; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = 0; int ret; offset = inode->i_size & (PAGE_SIZE - 1); /* * All buffers in the last page remain valid? Then there's nothing to * do. We do the check mainly to optimize the common PAGE_SIZE == * blocksize case */ if (offset > PAGE_SIZE - i_blocksize(inode)) return; while (1) { page = find_lock_page(inode->i_mapping, inode->i_size >> PAGE_SHIFT); if (!page) return; ret = __ext4_journalled_invalidatepage(page, offset, PAGE_SIZE - offset); unlock_page(page); put_page(page); if (ret != -EBUSY) return; commit_tid = 0; read_lock(&journal->j_state_lock); if (journal->j_committing_transaction) commit_tid = journal->j_committing_transaction->t_tid; read_unlock(&journal->j_state_lock); if (commit_tid) jbd2_log_wait_commit(journal, commit_tid); } } /* * ext4_setattr() * * Called from notify_change. * * We want to trap VFS attempts to truncate the file as soon as * possible. In particular, we want to make sure that when the VFS * shrinks i_size, we put the inode on the orphan list and modify * i_disksize immediately, so that during the subsequent flushing of * dirty pages and freeing of disk blocks, we can guarantee that any * commit will leave the blocks being flushed in an unused state on * disk. (On recovery, the inode will get truncated and the blocks will * be freed, so we have a strong guarantee that no future commit will * leave these blocks visible to the user.) * * Another thing we have to assure is that if we are in ordered mode * and inode is still attached to the committing transaction, we must * we start writeout of all the dirty pages which are being truncated. * This way we are sure that all the data written in the previous * transaction are already on disk (truncate waits for pages under * writeback). * * Called with inode->i_mutex down. */ int ext4_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error, rc = 0; int orphan = 0; const unsigned int ia_valid = attr->ia_valid; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; if (unlikely(IS_APPEND(inode) && (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)))) return -EPERM; error = setattr_prepare(dentry, attr); if (error) return error; error = fscrypt_prepare_setattr(dentry, attr); if (error) return error; error = fsverity_prepare_setattr(dentry, attr); if (error) return error; if (is_quota_modification(inode, attr)) { error = dquot_initialize(inode); if (error) return error; } if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } /* dquot_transfer() calls back ext4_get_inode_usage() which * counts xattr inode references. */ down_read(&EXT4_I(inode)->xattr_sem); error = dquot_transfer(inode, attr); up_read(&EXT4_I(inode)->xattr_sem); if (error) { ext4_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; int shrink = (attr->ia_size < inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; } if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) inode_inc_iversion(inode); if (shrink) { if (ext4_should_order_data(inode)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) goto err_out; } /* * Blocks are going to be removed from the inode. Wait * for dio in flight. */ inode_dio_wait(inode); } down_write(&EXT4_I(inode)->i_mmap_sem); rc = ext4_break_layouts(inode); if (rc) { up_write(&EXT4_I(inode)->i_mmap_sem); return rc; } if (attr->ia_size != inode->i_size) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto out_mmap_sem; } if (ext4_handle_valid(handle) && shrink) { error = ext4_orphan_add(handle, inode); orphan = 1; } /* * Update c/mtime on truncate up, ext4_truncate() will * update c/mtime in shrink case below */ if (!shrink) { inode->i_mtime = current_time(inode); inode->i_ctime = inode->i_mtime; } down_write(&EXT4_I(inode)->i_data_sem); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) error = rc; /* * We have to update i_size under i_data_sem together * with i_disksize to avoid races with writeback code * running ext4_wb_update_i_disksize(). */ if (!error) i_size_write(inode, attr->ia_size); up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) goto out_mmap_sem; if (!shrink) { pagecache_isize_extended(inode, oldsize, inode->i_size); } else if (ext4_should_journal_data(inode)) { ext4_wait_for_tail_page_commit(inode); } } /* * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ truncate_pagecache(inode, inode->i_size); /* * Call ext4_truncate() even if i_size didn't change to * truncate possible preallocated blocks. */ if (attr->ia_size <= oldsize) { rc = ext4_truncate(inode); if (rc) error = rc; } out_mmap_sem: up_write(&EXT4_I(inode)->i_mmap_sem); } if (!error) { setattr_copy(inode, attr); mark_inode_dirty(inode); } /* * If the call to ext4_truncate failed to get a transaction handle at * all, we need to clean up the in-core orphan list manually. */ if (orphan && inode->i_nlink) ext4_orphan_del(NULL, inode); if (!error && (ia_valid & ATTR_MODE)) rc = posix_acl_chmod(inode, inode->i_mode); err_out: ext4_std_error(inode->i_sb, error); if (!error) error = rc; return error; } int ext4_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct ext4_inode *raw_inode; struct ext4_inode_info *ei = EXT4_I(inode); unsigned int flags; if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { stat->result_mask |= STATX_BTIME; stat->btime.tv_sec = ei->i_crtime.tv_sec; stat->btime.tv_nsec = ei->i_crtime.tv_nsec; } flags = ei->i_flags & EXT4_FL_USER_VISIBLE; if (flags & EXT4_APPEND_FL) stat->attributes |= STATX_ATTR_APPEND; if (flags & EXT4_COMPR_FL) stat->attributes |= STATX_ATTR_COMPRESSED; if (flags & EXT4_ENCRYPT_FL) stat->attributes |= STATX_ATTR_ENCRYPTED; if (flags & EXT4_IMMUTABLE_FL) stat->attributes |= STATX_ATTR_IMMUTABLE; if (flags & EXT4_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP); generic_fillattr(inode, stat); return 0; } int ext4_file_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); u64 delalloc_blocks; ext4_getattr(path, stat, request_mask, query_flags); /* * If there is inline data in the inode, the inode will normally not * have data blocks allocated (it may have an external xattr block). * Report at least one sector for such files, so tools like tar, rsync, * others don't incorrectly think the file is completely sparse. */ if (unlikely(ext4_has_inline_data(inode))) stat->blocks += (stat->size + 511) >> 9; /* * We can't update i_blocks if the block allocation is delayed * otherwise in the case of system crash before the real block * allocation is done, we will have i_blocks inconsistent with * on-disk file blocks. * We always keep i_blocks updated together with real * allocation. But to not confuse with user, stat * will return the blocks that include the delayed allocation * blocks for this file. */ delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), EXT4_I(inode)->i_reserved_data_blocks); stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); return 0; } static int ext4_index_trans_blocks(struct inode *inode, int lblocks, int pextents) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return ext4_ind_trans_blocks(inode, lblocks); return ext4_ext_index_trans_blocks(inode, pextents); } /* * Account for index blocks, block groups bitmaps and block group * descriptor blocks if modify datablocks and index blocks * worse case, the indexs blocks spread over different block groups * * If datablocks are discontiguous, they are possible to spread over * different block groups too. If they are contiguous, with flexbg, * they could still across block group boundary. * * Also account for superblock, inode, quota and xattr blocks */ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents) { ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; int idxblocks; int ret = 0; /* * How many index blocks need to touch to map @lblocks logical blocks * to @pextents physical extents? */ idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); ret = idxblocks; /* * Now let's see how many group bitmaps and group descriptors need * to account */ groups = idxblocks + pextents; gdpblocks = groups; if (groups > ngroups) groups = ngroups; if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; /* bitmaps and block group descriptor blocks */ ret += groups + gdpblocks; /* Blocks for super block, inode, quota and xattr blocks */ ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } /* * Calculate the total number of credits to reserve to fit * the modification of a single pages into a single transaction, * which may include multiple chunks of block allocations. * * This could be called via ext4_write_begin() * * We need to consider the worse case, when * one new block per extent. */ int ext4_writepage_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); int ret; ret = ext4_meta_trans_blocks(inode, bpp, bpp); /* Account for data blocks for journalled mode */ if (ext4_should_journal_data(inode)) ret += bpp; return ret; } /* * Calculate the journal credits for a chunk of data modification. * * This is called from DIO, fallocate or whoever calling * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. * * journal buffers for data blocks are not included here, as DIO * and fallocate do no need to journal data buffers. */ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) { return ext4_meta_trans_blocks(inode, nrblocks, 1); } /* * The caller must have previously called ext4_reserve_inode_write(). * Give this, we know that the caller already has write access to iloc->bh. */ int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err = 0; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { put_bh(iloc->bh); return -EIO; } if (IS_I_VERSION(inode)) inode_inc_iversion(inode); /* the do_update_inode consumes one bh->b_count */ get_bh(iloc->bh); /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ err = ext4_do_update_inode(handle, inode, iloc); put_bh(iloc->bh); return err; } /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; err = ext4_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, iloc->bh); if (err) { brelse(iloc->bh); iloc->bh = NULL; } } ext4_std_error(inode->i_sb, err); return err; } static int __ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc, handle_t *handle, int *no_expand) { struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; int error; raw_inode = ext4_raw_inode(iloc); header = IHDR(inode, raw_inode); /* No extended attributes present */ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize, 0, new_extra_isize - EXT4_I(inode)->i_extra_isize); EXT4_I(inode)->i_extra_isize = new_extra_isize; return 0; } /* try to expand with EAs present */ error = ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); if (error) { /* * Inode size expansion failed; don't try again */ *no_expand = 1; } return error; } /* * Expand an inode by new_extra_isize bytes. * Returns 0 on success or negative error number on failure. */ static int ext4_try_to_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc iloc, handle_t *handle) { int no_expand; int error; if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) return -EOVERFLOW; /* * In nojournal mode, we can immediately attempt to expand * the inode. When journaled, we first need to obtain extra * buffer credits since we may write into the EA block * with this same handle. If journal_extend fails, then it will * only result in a minor loss of functionality for that inode. * If this is felt to be critical, then e2fsck should be run to * force a large enough s_min_extra_isize. */ if (ext4_journal_extend(handle, EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0) return -ENOSPC; if (ext4_write_trylock_xattr(inode, &no_expand) == 0) return -EBUSY; error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc, handle, &no_expand); ext4_write_unlock_xattr(inode, &no_expand); return error; } int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc) { handle_t *handle; int no_expand; int error, rc; if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { brelse(iloc->bh); return -EOVERFLOW; } handle = ext4_journal_start(inode, EXT4_HT_INODE, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { error = PTR_ERR(handle); brelse(iloc->bh); return error; } ext4_write_lock_xattr(inode, &no_expand); BUFFER_TRACE(iloc->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, iloc->bh); if (error) { brelse(iloc->bh); goto out_stop; } error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, handle, &no_expand); rc = ext4_mark_iloc_dirty(handle, inode, iloc); if (!error) error = rc; ext4_write_unlock_xattr(inode, &no_expand); out_stop: ext4_journal_stop(handle); return error; } /* * What we do here is to mark the in-core inode as clean with respect to inode * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which * have a transaction open against a different journal. * * Is this cheating? Not really. Sure, we haven't written the * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. */ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) { struct ext4_iloc iloc; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int err; might_sleep(); trace_ext4_mark_inode_dirty(inode, _RET_IP_); err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) return err; if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, iloc, handle); return ext4_mark_iloc_dirty(handle, inode, &iloc); } /* * ext4_dirty_inode() is called from __mark_inode_dirty() * * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * * Also, dquot_alloc_block() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. * * If only the I_DIRTY_TIME flag is set, we can skip everything. If * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need * to copy into the on-disk inode structure are the timestamp files. */ void ext4_dirty_inode(struct inode *inode, int flags) { handle_t *handle; if (flags == I_DIRTY_TIME) return; handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) goto out; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return; } int ext4_change_inode_journal_flag(struct inode *inode, int val) { journal_t *journal; handle_t *handle; int err; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); /* * We have to be very careful here: changing a data block's * journaling status dynamically is dangerous. If we write a * data block to the journal, change the status and then delete * that block, we risk forgetting to revoke the old log record * from the journal and so a subsequent replay can corrupt data. * So, first we make sure that the journal is empty and that * nobody is changing anything. */ journal = EXT4_JOURNAL(inode); if (!journal) return 0; if (is_journal_aborted(journal)) return -EROFS; /* Wait for all existing dio workers */ inode_dio_wait(inode); /* * Before flushing the journal and switching inode's aops, we have * to flush all dirty data the inode has. There can be outstanding * delayed allocations, there can be unwritten extents created by * fallocate or buffered writes in dioread_nolock mode covered by * dirty data which can be converted only after flushing the dirty * data (and journalled aops don't know how to handle these cases). */ if (val) { down_write(&EXT4_I(inode)->i_mmap_sem); err = filemap_write_and_wait(inode->i_mapping); if (err < 0) { up_write(&EXT4_I(inode)->i_mmap_sem); return err; } } percpu_down_write(&sbi->s_journal_flag_rwsem); jbd2_journal_lock_updates(journal); /* * OK, there are no updates running now, and all cached data is * synced to disk. We are now in a completely consistent state * which doesn't have anything in the journal, and we know that * no filesystem updates are running, so it is safe to modify * the inode's in-core data-journaling state flag now. */ if (val) ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); else { err = jbd2_journal_flush(journal); if (err < 0) { jbd2_journal_unlock_updates(journal); percpu_up_write(&sbi->s_journal_flag_rwsem); return err; } ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); } ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); percpu_up_write(&sbi->s_journal_flag_rwsem); if (val) up_write(&EXT4_I(inode)->i_mmap_sem); /* Finally we can mark the inode as dirty. */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext4_mark_inode_dirty(handle, inode); ext4_handle_sync(handle); ext4_journal_stop(handle); ext4_std_error(inode->i_sb, err); return err; } static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) { return !buffer_mapped(bh); } vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page = vmf->page; loff_t size; unsigned long len; int err; vm_fault_t ret; struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; handle_t *handle; get_block_t *get_block; int retries = 0; if (unlikely(IS_IMMUTABLE(inode))) return VM_FAULT_SIGBUS; sb_start_pagefault(inode->i_sb); file_update_time(vma->vm_file); down_read(&EXT4_I(inode)->i_mmap_sem); err = ext4_convert_inline_data(inode); if (err) goto out_ret; /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && !ext4_nonda_switch(inode->i_sb)) { do { err = block_page_mkwrite(vma, vmf, ext4_da_get_block_prep); } while (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)); goto out_ret; } lock_page(page); size = i_size_read(inode); /* Page got truncated from under us? */ if (page->mapping != mapping || page_offset(page) > size) { unlock_page(page); ret = VM_FAULT_NOPAGE; goto out; } if (page->index == size >> PAGE_SHIFT) len = size & ~PAGE_MASK; else len = PAGE_SIZE; /* * Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time */ if (page_has_buffers(page)) { if (!ext4_walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { /* Wait so that we don't change page under IO */ wait_for_stable_page(page); ret = VM_FAULT_LOCKED; goto out; } } unlock_page(page); /* OK, we need to fill the hole... */ if (ext4_should_dioread_nolock(inode)) get_block = ext4_get_block_unwritten; else get_block = ext4_get_block; retry_alloc: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = VM_FAULT_SIGBUS; goto out; } err = block_page_mkwrite(vma, vmf, get_block); if (!err && ext4_should_journal_data(inode)) { if (ext4_walk_page_buffers(handle, page_buffers(page), 0, PAGE_SIZE, NULL, do_journal_get_write_access)) { unlock_page(page); ret = VM_FAULT_SIGBUS; ext4_journal_stop(handle); goto out; } ext4_set_inode_state(inode, EXT4_STATE_JDATA); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_alloc; out_ret: ret = block_page_mkwrite_return(err); out: up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(inode->i_sb); return ret; } vm_fault_t ext4_filemap_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); vm_fault_t ret; down_read(&EXT4_I(inode)->i_mmap_sem); ret = filemap_fault(vmf); up_read(&EXT4_I(inode)->i_mmap_sem); return ret; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_1314_0
crossvul-cpp_data_good_5503_0
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/time.h> #include <linux/math64.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define CREATE_TRACE_POINTS #include "pcm_trace.h" #else #define trace_hwptr(substream, pos, in_interrupt) #define trace_xrun(substream) #define trace_hw_ptr_error(substream, reason) #endif /* * fill ring buffer with silence * runtime->silence_start: starting pointer to silence area * runtime->silence_filled: size filled with silence * runtime->silence_threshold: threshold from application * runtime->silence_size: maximal size from application * * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately */ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames, ofs, transfer; if (runtime->silence_size < runtime->boundary) { snd_pcm_sframes_t noise_dist, n; if (runtime->silence_start != runtime->control->appl_ptr) { n = runtime->control->appl_ptr - runtime->silence_start; if (n < 0) n += runtime->boundary; if ((snd_pcm_uframes_t)n < runtime->silence_filled) runtime->silence_filled -= n; else runtime->silence_filled = 0; runtime->silence_start = runtime->control->appl_ptr; } if (runtime->silence_filled >= runtime->buffer_size) return; noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled; if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) return; frames = runtime->silence_threshold - noise_dist; if (frames > runtime->silence_size) frames = runtime->silence_size; } else { if (new_hw_ptr == ULONG_MAX) { /* initialization */ snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime); if (avail > runtime->buffer_size) avail = runtime->buffer_size; runtime->silence_filled = avail > 0 ? avail : 0; runtime->silence_start = (runtime->status->hw_ptr + runtime->silence_filled) % runtime->boundary; } else { ofs = runtime->status->hw_ptr; frames = new_hw_ptr - ofs; if ((snd_pcm_sframes_t)frames < 0) frames += runtime->boundary; runtime->silence_filled -= frames; if ((snd_pcm_sframes_t)runtime->silence_filled < 0) { runtime->silence_filled = 0; runtime->silence_start = new_hw_ptr; } else { runtime->silence_start = ofs; } } frames = runtime->buffer_size - runtime->silence_filled; } if (snd_BUG_ON(frames > runtime->buffer_size)) return; if (frames == 0) return; ofs = runtime->silence_start % runtime->buffer_size; while (frames > 0) { transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) { if (substream->ops->silence) { int err; err = substream->ops->silence(substream, -1, ofs, transfer); snd_BUG_ON(err < 0); } else { char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, ofs); snd_pcm_format_set_silence(runtime->format, hwbuf, transfer * runtime->channels); } } else { unsigned int c; unsigned int channels = runtime->channels; if (substream->ops->silence) { for (c = 0; c < channels; ++c) { int err; err = substream->ops->silence(substream, c, ofs, transfer); snd_BUG_ON(err < 0); } } else { size_t dma_csize = runtime->dma_bytes / channels; for (c = 0; c < channels; ++c) { char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, ofs); snd_pcm_format_set_silence(runtime->format, hwbuf, transfer); } } } runtime->silence_filled += transfer; frames -= transfer; ofs = 0; } } #ifdef CONFIG_SND_DEBUG void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len) { snprintf(name, len, "pcmC%dD%d%c:%d", substream->pcm->card->number, substream->pcm->device, substream->stream ? 'c' : 'p', substream->number); } EXPORT_SYMBOL(snd_pcm_debug_name); #endif #define XRUN_DEBUG_BASIC (1<<0) #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define xrun_debug(substream, mask) \ ((substream)->pstr->xrun_debug & (mask)) #else #define xrun_debug(substream, mask) 0 #endif #define dump_stack_on_xrun(substream) do { \ if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ dump_stack(); \ } while (0) static void xrun(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; trace_xrun(substream); if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { char name[16]; snd_pcm_debug_name(substream, name, sizeof(name)); pcm_warn(substream->pcm, "XRUN: %s\n", name); dump_stack_on_xrun(substream); } } #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ do { \ trace_hw_ptr_error(substream, reason); \ if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ (in_interrupt) ? 'Q' : 'P', ##args); \ dump_stack_on_xrun(substream); \ } \ } while (0) #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ #define hw_ptr_error(substream, fmt, args...) do { } while (0) #endif int snd_pcm_update_state(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime) { snd_pcm_uframes_t avail; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) avail = snd_pcm_playback_avail(runtime); else avail = snd_pcm_capture_avail(runtime); if (avail > runtime->avail_max) runtime->avail_max = avail; if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { if (avail >= runtime->buffer_size) { snd_pcm_drain_done(substream); return -EPIPE; } } else { if (avail >= runtime->stop_threshold) { xrun(substream); return -EPIPE; } } if (runtime->twake) { if (avail >= runtime->twake) wake_up(&runtime->tsleep); } else if (avail >= runtime->control->avail_min) wake_up(&runtime->sleep); return 0; } static void update_audio_tstamp(struct snd_pcm_substream *substream, struct timespec *curr_tstamp, struct timespec *audio_tstamp) { struct snd_pcm_runtime *runtime = substream->runtime; u64 audio_frames, audio_nsecs; struct timespec driver_tstamp; if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) return; if (!(substream->ops->get_time_info) || (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { /* * provide audio timestamp derived from pointer position * add delay only if requested */ audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; if (runtime->audio_tstamp_config.report_delay) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) audio_frames -= runtime->delay; else audio_frames += runtime->delay; } audio_nsecs = div_u64(audio_frames * 1000000000LL, runtime->rate); *audio_tstamp = ns_to_timespec(audio_nsecs); } runtime->status->audio_tstamp = *audio_tstamp; runtime->status->tstamp = *curr_tstamp; /* * re-take a driver timestamp to let apps detect if the reference tstamp * read by low-level hardware was provided with a delay */ snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp); runtime->driver_tstamp = driver_tstamp; } static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, unsigned int in_interrupt) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t pos; snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; snd_pcm_sframes_t hdelta, delta; unsigned long jdelta; unsigned long curr_jiffies; struct timespec curr_tstamp; struct timespec audio_tstamp; int crossed_boundary = 0; old_hw_ptr = runtime->status->hw_ptr; /* * group pointer, time and jiffies reads to allow for more * accurate correlations/corrections. * The values are stored at the end of this routine after * corrections for hw_ptr position */ pos = substream->ops->pointer(substream); curr_jiffies = jiffies; if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { if ((substream->ops->get_time_info) && (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { substream->ops->get_time_info(substream, &curr_tstamp, &audio_tstamp, &runtime->audio_tstamp_config, &runtime->audio_tstamp_report); /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); } else snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp); } if (pos == SNDRV_PCM_POS_XRUN) { xrun(substream); return -EPIPE; } if (pos >= runtime->buffer_size) { if (printk_ratelimit()) { char name[16]; snd_pcm_debug_name(substream, name, sizeof(name)); pcm_err(substream->pcm, "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", name, pos, runtime->buffer_size, runtime->period_size); } pos = 0; } pos -= pos % runtime->min_align; trace_hwptr(substream, pos, in_interrupt); hw_base = runtime->hw_ptr_base; new_hw_ptr = hw_base + pos; if (in_interrupt) { /* we know that one period was processed */ /* delta = "expected next hw_ptr" for in_interrupt != 0 */ delta = runtime->hw_ptr_interrupt + runtime->period_size; if (delta > new_hw_ptr) { /* check for double acknowledged interrupts */ hdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; goto __delta; } } } /* new_hw_ptr might be lower than old_hw_ptr in case when */ /* pointer crosses the end of the ring buffer */ if (new_hw_ptr < old_hw_ptr) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; } __delta: delta = new_hw_ptr - old_hw_ptr; if (delta < 0) delta += runtime->boundary; if (runtime->no_period_wakeup) { snd_pcm_sframes_t xrun_threshold; /* * Without regular period interrupts, we have to check * the elapsed time to detect xruns. */ jdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) goto no_delta_check; hdelta = jdelta - delta * HZ / runtime->rate; xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; while (hdelta > xrun_threshold) { delta += runtime->buffer_size; hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; hdelta -= runtime->hw_ptr_buffer_jiffies; } goto no_delta_check; } /* something must be really wrong */ if (delta >= runtime->buffer_size + runtime->period_size) { hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", substream->stream, (long)pos, (long)new_hw_ptr, (long)old_hw_ptr); return 0; } /* Do jiffies check only in xrun_debug mode */ if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) goto no_jiffies_check; /* Skip the jiffies check for hardwares with BATCH flag. * Such hardware usually just increases the position at each IRQ, * thus it can't give any strange position. */ if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) goto no_jiffies_check; hdelta = delta; if (hdelta < runtime->delay) goto no_jiffies_check; hdelta -= runtime->delay; jdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { delta = jdelta / (((runtime->period_size * HZ) / runtime->rate) + HZ/100); /* move new_hw_ptr according jiffies not pos variable */ new_hw_ptr = old_hw_ptr; hw_base = delta; /* use loop to avoid checks for delta overflows */ /* the delta value is small or zero in most cases */ while (delta > 0) { new_hw_ptr += runtime->period_size; if (new_hw_ptr >= runtime->boundary) { new_hw_ptr -= runtime->boundary; crossed_boundary--; } delta--; } /* align hw_base to buffer_size */ hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", (long)pos, (long)hdelta, (long)runtime->period_size, jdelta, ((hdelta * HZ) / runtime->rate), hw_base, (unsigned long)old_hw_ptr, (unsigned long)new_hw_ptr); /* reset values to proper state */ delta = 0; hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); } no_jiffies_check: if (delta > runtime->period_size + runtime->period_size / 2) { hw_ptr_error(substream, in_interrupt, "Lost interrupts?", "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", substream->stream, (long)delta, (long)new_hw_ptr, (long)old_hw_ptr); } no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) { update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return 0; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, new_hw_ptr); if (in_interrupt) { delta = new_hw_ptr - runtime->hw_ptr_interrupt; if (delta < 0) delta += runtime->boundary; delta -= (snd_pcm_uframes_t)delta % runtime->period_size; runtime->hw_ptr_interrupt += delta; if (runtime->hw_ptr_interrupt >= runtime->boundary) runtime->hw_ptr_interrupt -= runtime->boundary; } runtime->hw_ptr_base = hw_base; runtime->status->hw_ptr = new_hw_ptr; runtime->hw_ptr_jiffies = curr_jiffies; if (crossed_boundary) { snd_BUG_ON(crossed_boundary != 1); runtime->hw_ptr_wrap += runtime->boundary; } update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return snd_pcm_update_state(substream, runtime); } /* CAUTION: call it with irq disabled */ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) { return snd_pcm_update_hw_ptr0(substream, 0); } /** * snd_pcm_set_ops - set the PCM operators * @pcm: the pcm instance * @direction: stream direction, SNDRV_PCM_STREAM_XXX * @ops: the operator table * * Sets the given PCM operators to the pcm instance. */ void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, const struct snd_pcm_ops *ops) { struct snd_pcm_str *stream = &pcm->streams[direction]; struct snd_pcm_substream *substream; for (substream = stream->substream; substream != NULL; substream = substream->next) substream->ops = ops; } EXPORT_SYMBOL(snd_pcm_set_ops); /** * snd_pcm_sync - set the PCM sync id * @substream: the pcm substream * * Sets the PCM sync identifier for the card. */ void snd_pcm_set_sync(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->sync.id32[0] = substream->pcm->card->number; runtime->sync.id32[1] = -1; runtime->sync.id32[2] = -1; runtime->sync.id32[3] = -1; } EXPORT_SYMBOL(snd_pcm_set_sync); /* * Standard ioctl routine */ static inline unsigned int div32(unsigned int a, unsigned int b, unsigned int *r) { if (b == 0) { *r = 0; return UINT_MAX; } *r = a % b; return a / b; } static inline unsigned int div_down(unsigned int a, unsigned int b) { if (b == 0) return UINT_MAX; return a / b; } static inline unsigned int div_up(unsigned int a, unsigned int b) { unsigned int r; unsigned int q; if (b == 0) return UINT_MAX; q = div32(a, b, &r); if (r) ++q; return q; } static inline unsigned int mul(unsigned int a, unsigned int b) { if (a == 0) return 0; if (div_down(UINT_MAX, a) < b) return UINT_MAX; return a * b; } static inline unsigned int muldiv32(unsigned int a, unsigned int b, unsigned int c, unsigned int *r) { u_int64_t n = (u_int64_t) a * b; if (c == 0) { snd_BUG_ON(!n); *r = 0; return UINT_MAX; } n = div_u64_rem(n, c, r); if (n >= UINT_MAX) { *r = 0; return UINT_MAX; } return n; } /** * snd_interval_refine - refine the interval value of configurator * @i: the interval value to refine * @v: the interval value to refer to * * Refines the interval value with the reference value. * The interval is changed to the range satisfying both intervals. * The interval status (min, max, integer, etc.) are evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) { int changed = 0; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (i->min < v->min) { i->min = v->min; i->openmin = v->openmin; changed = 1; } else if (i->min == v->min && !i->openmin && v->openmin) { i->openmin = 1; changed = 1; } if (i->max > v->max) { i->max = v->max; i->openmax = v->openmax; changed = 1; } else if (i->max == v->max && !i->openmax && v->openmax) { i->openmax = 1; changed = 1; } if (!i->integer && v->integer) { i->integer = 1; changed = 1; } if (i->integer) { if (i->openmin) { i->min++; i->openmin = 0; } if (i->openmax) { i->max--; i->openmax = 0; } } else if (!i->openmin && !i->openmax && i->min == i->max) i->integer = 1; if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } EXPORT_SYMBOL(snd_interval_refine); static int snd_interval_refine_first(struct snd_interval *i) { if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->max = i->min; i->openmax = i->openmin; if (i->openmax) i->max++; return 1; } static int snd_interval_refine_last(struct snd_interval *i) { if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->min = i->max; i->openmin = i->openmax; if (i->openmin) i->min--; return 1; } void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = mul(a->min, b->min); c->openmin = (a->openmin || b->openmin); c->max = mul(a->max, b->max); c->openmax = (a->openmax || b->openmax); c->integer = (a->integer && b->integer); } /** * snd_interval_div - refine the interval value with division * @a: dividend * @b: divisor * @c: quotient * * c = a / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = div32(a->min, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = div32(a->max, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /** * snd_interval_muldivk - refine the interval value * @a: dividend 1 * @b: dividend 2 * @k: divisor (as integer) * @c: result * * c = a * b / k * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, unsigned int k, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, b->min, k, &r); c->openmin = (r || a->openmin || b->openmin); c->max = muldiv32(a->max, b->max, k, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmax); c->integer = 0; } /** * snd_interval_mulkdiv - refine the interval value * @a: dividend 1 * @k: dividend 2 (as integer) * @b: divisor * @c: result * * c = a * k / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, k, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = muldiv32(a->max, k, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /* ---- */ /** * snd_interval_ratnum - refine the interval value * @i: interval to refine * @rats_count: number of ratnum_t * @rats: ratnum_t array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_ratnum(struct snd_interval *i, unsigned int rats_count, const struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_den; int best_diff; unsigned int k; struct snd_interval t; int err; unsigned int result_num, result_den; int result_diff; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->min; int diff; if (q == 0) q = 1; den = div_up(num, q); if (den < rats[k].den_min) continue; if (den > rats[k].den_max) den = rats[k].den_max; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den -= r; } diff = num - q * den; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); result_num = best_num; result_diff = best_diff; result_den = best_den; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->max; int diff; if (q == 0) { i->empty = 1; return -EINVAL; } den = div_down(num, q); if (den > rats[k].den_max) continue; if (den < rats[k].den_min) den = rats[k].den_min; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den += rats[k].den_step - r; } diff = q * den - num; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (best_diff * result_den < result_diff * best_den) { result_num = best_num; result_den = best_den; } if (nump) *nump = result_num; if (denp) *denp = result_den; } return err; } EXPORT_SYMBOL(snd_interval_ratnum); /** * snd_interval_ratden - refine the interval value * @i: interval to refine * @rats_count: number of struct ratden * @rats: struct ratden array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ static int snd_interval_ratden(struct snd_interval *i, unsigned int rats_count, const struct snd_ratden *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_diff, best_den; unsigned int k; struct snd_interval t; int err; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->min; int diff; num = mul(q, den); if (num > rats[k].num_max) continue; if (num < rats[k].num_min) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num += rats[k].num_step - r; } diff = num - q * den; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->max; int diff; num = mul(q, den); if (num < rats[k].num_min) continue; if (num > rats[k].num_max) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num -= r; } diff = q * den - num; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (nump) *nump = best_num; if (denp) *denp = best_den; } return err; } /** * snd_interval_list - refine the interval value from the list * @i: the interval value to refine * @count: the number of elements in the list * @list: the value list * @mask: the bit-mask to evaluate * * Refines the interval value from the list. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_list(struct snd_interval *i, unsigned int count, const unsigned int *list, unsigned int mask) { unsigned int k; struct snd_interval list_range; if (!count) { i->empty = 1; return -EINVAL; } snd_interval_any(&list_range); list_range.min = UINT_MAX; list_range.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; if (!snd_interval_test(i, list[k])) continue; list_range.min = min(list_range.min, list[k]); list_range.max = max(list_range.max, list[k]); } return snd_interval_refine(i, &list_range); } EXPORT_SYMBOL(snd_interval_list); /** * snd_interval_ranges - refine the interval value from the list of ranges * @i: the interval value to refine * @count: the number of elements in the list of ranges * @ranges: the ranges list * @mask: the bit-mask to evaluate * * Refines the interval value from the list of ranges. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_ranges(struct snd_interval *i, unsigned int count, const struct snd_interval *ranges, unsigned int mask) { unsigned int k; struct snd_interval range_union; struct snd_interval range; if (!count) { snd_interval_none(i); return -EINVAL; } snd_interval_any(&range_union); range_union.min = UINT_MAX; range_union.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; snd_interval_copy(&range, &ranges[k]); if (snd_interval_refine(&range, i) < 0) continue; if (snd_interval_empty(&range)) continue; if (range.min < range_union.min) { range_union.min = range.min; range_union.openmin = 1; } if (range.min == range_union.min && !range.openmin) range_union.openmin = 0; if (range.max > range_union.max) { range_union.max = range.max; range_union.openmax = 1; } if (range.max == range_union.max && !range.openmax) range_union.openmax = 0; } return snd_interval_refine(i, &range_union); } EXPORT_SYMBOL(snd_interval_ranges); static int snd_interval_step(struct snd_interval *i, unsigned int step) { unsigned int n; int changed = 0; n = i->min % step; if (n != 0 || i->openmin) { i->min += step - n; i->openmin = 0; changed = 1; } n = i->max % step; if (n != 0 || i->openmax) { i->max -= n; i->openmax = 0; changed = 1; } if (snd_interval_checkempty(i)) { i->empty = 1; return -EINVAL; } return changed; } /* Info constraints helpers */ /** * snd_pcm_hw_rule_add - add the hw-constraint rule * @runtime: the pcm runtime instance * @cond: condition bits * @var: the variable to evaluate * @func: the evaluation function * @private: the private data pointer passed to function * @dep: the dependent variables * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, int var, snd_pcm_hw_rule_func_t func, void *private, int dep, ...) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_pcm_hw_rule *c; unsigned int k; va_list args; va_start(args, dep); if (constrs->rules_num >= constrs->rules_all) { struct snd_pcm_hw_rule *new; unsigned int new_rules = constrs->rules_all + 16; new = kcalloc(new_rules, sizeof(*c), GFP_KERNEL); if (!new) { va_end(args); return -ENOMEM; } if (constrs->rules) { memcpy(new, constrs->rules, constrs->rules_num * sizeof(*c)); kfree(constrs->rules); } constrs->rules = new; constrs->rules_all = new_rules; } c = &constrs->rules[constrs->rules_num]; c->cond = cond; c->func = func; c->var = var; c->private = private; k = 0; while (1) { if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { va_end(args); return -EINVAL; } c->deps[k++] = dep; if (dep < 0) break; dep = va_arg(args, int); } constrs->rules_num++; va_end(args); return 0; } EXPORT_SYMBOL(snd_pcm_hw_rule_add); /** * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the bitmap mask * * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int32_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); *maskp->bits &= mask; memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ if (*maskp->bits == 0) return -EINVAL; return 0; } /** * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the 64bit bitmap mask * * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int64_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); maskp->bits[0] &= (u_int32_t)mask; maskp->bits[1] &= (u_int32_t)(mask >> 32); memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ if (! maskp->bits[0] && ! maskp->bits[1]) return -EINVAL; return 0; } EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); /** * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the integer constraint * * Apply the constraint of integer to an interval parameter. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; return snd_interval_setinteger(constrs_interval(constrs, var)); } EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); /** * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the range * @min: the minimal value * @max: the maximal value * * Apply the min/max range constraint to an interval parameter. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int min, unsigned int max) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_interval t; t.min = min; t.max = max; t.openmin = t.openmax = 0; t.integer = 0; return snd_interval_refine(constrs_interval(constrs, var), &t); } EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_list *list = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); } /** * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list constraint * @l: list * * Apply the list of constraints to an interval parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_list *l) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_list, (void *)l, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_list); static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_ranges *r = rule->private; return snd_interval_ranges(hw_param_interval(params, rule->var), r->count, r->ranges, r->mask); } /** * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list of range constraints * @r: ranges * * Apply the list of range constraints to an interval parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ranges *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ranges, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { const struct snd_pcm_hw_constraint_ratnums *r = rule->private; unsigned int num = 0, den = 0; int err; err = snd_interval_ratnum(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratnums constraint * @r: struct snd_ratnums constriants * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratnums *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratnums, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { const struct snd_pcm_hw_constraint_ratdens *r = rule->private; unsigned int num = 0, den = 0; int err = snd_interval_ratden(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratdens constraint * @r: struct snd_ratdens constriants * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratdens *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratdens, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int l = (unsigned long) rule->private; int width = l & 0xffff; unsigned int msbits = l >> 16; struct snd_interval *i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (!snd_interval_single(i)) return 0; if ((snd_interval_value(i) == width) || (width == 0 && snd_interval_value(i) > msbits)) params->msbits = min_not_zero(params->msbits, msbits); return 0; } /** * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule * @runtime: PCM runtime instance * @cond: condition bits * @width: sample bits width * @msbits: msbits width * * This constraint will set the number of most significant bits (msbits) if a * sample format with the specified width has been select. If width is set to 0 * the msbits will be set for any sample format with a width larger than the * specified msbits. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, unsigned int msbits) { unsigned long l = (msbits << 16) | width; return snd_pcm_hw_rule_add(runtime, cond, -1, snd_pcm_hw_rule_msbits, (void*) l, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned long step = (unsigned long) rule->private; return snd_interval_step(hw_param_interval(params, rule->var), step); } /** * snd_pcm_hw_constraint_step - add a hw constraint step rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the step constraint * @step: step size * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, unsigned long step) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_step, (void *) step, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_step); static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { static unsigned int pow2_sizes[] = { 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 }; return snd_interval_list(hw_param_interval(params, rule->var), ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); } /** * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the power-of-2 constraint * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_pow2, NULL, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; struct snd_interval *rate; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); return snd_interval_list(rate, 1, &base_rate, 0); } /** * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling * @runtime: PCM runtime instance * @base_rate: the rate at which the hardware does not resample * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, unsigned int base_rate) { return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_noresample_func, (void *)(uintptr_t)base_rate, SNDRV_PCM_HW_PARAM_RATE, -1); } EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_any(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } if (hw_is_interval(var)) { snd_interval_any(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } snd_BUG(); } void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) { unsigned int k; memset(params, 0, sizeof(*params)); for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) _snd_pcm_hw_param_any(params, k); for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) _snd_pcm_hw_param_any(params, k); params->info = ~0U; } EXPORT_SYMBOL(_snd_pcm_hw_params_any); /** * snd_pcm_hw_param_value - return @params field @var value * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Return: The value for field @var if it's fixed in configuration space * defined by @params. -%EINVAL otherwise. */ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { const struct snd_mask *mask = hw_param_mask_c(params, var); if (!snd_mask_single(mask)) return -EINVAL; if (dir) *dir = 0; return snd_mask_value(mask); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (!snd_interval_single(i)) return -EINVAL; if (dir) *dir = i->openmin; return snd_interval_value(i); } return -EINVAL; } EXPORT_SYMBOL(snd_pcm_hw_param_value); void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_none(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else if (hw_is_interval(var)) { snd_interval_none(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else { snd_BUG(); } } EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_first(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_first(hw_param_interval(params, var)); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_first - refine config space and return minimum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values > minimum. Reduce configuration space accordingly. * * Return: The minimum, or a negative error code on failure. */ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_first(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (snd_BUG_ON(err < 0)) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_first); static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_last(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_last(hw_param_interval(params, var)); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_last - refine config space and return maximum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values < maximum. Reduce configuration space accordingly. * * Return: The maximum, or a negative error code on failure. */ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_last(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (snd_BUG_ON(err < 0)) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_last); /** * snd_pcm_hw_param_choose - choose a configuration defined by @params * @pcm: PCM instance * @params: the hw_params instance * * Choose one configuration from configuration space defined by @params. * The configuration chosen is that obtained fixing in this order: * first access, first format, first subformat, min channels, * min rate, min period time, max buffer size, min tick time * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params) { static int vars[] = { SNDRV_PCM_HW_PARAM_ACCESS, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SUBFORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, SNDRV_PCM_HW_PARAM_RATE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_TICK_TIME, -1 }; int err, *v; for (v = vars; *v != -1; v++) { if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE) err = snd_pcm_hw_param_first(pcm, params, *v, NULL); else err = snd_pcm_hw_param_last(pcm, params, *v, NULL); if (snd_BUG_ON(err < 0)) return err; } return 0; } static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned long flags; snd_pcm_stream_lock_irqsave(substream, flags); if (snd_pcm_running(substream) && snd_pcm_update_hw_ptr(substream) >= 0) runtime->status->hw_ptr %= runtime->buffer_size; else { runtime->status->hw_ptr = 0; runtime->hw_ptr_wrap = 0; } snd_pcm_stream_unlock_irqrestore(substream, flags); return 0; } static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_channel_info *info = arg; struct snd_pcm_runtime *runtime = substream->runtime; int width; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { info->offset = -1; return 0; } width = snd_pcm_format_physical_width(runtime->format); if (width < 0) return width; info->offset = 0; switch (runtime->access) { case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: case SNDRV_PCM_ACCESS_RW_INTERLEAVED: info->first = info->channel * width; info->step = runtime->channels * width; break; case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: { size_t size = runtime->dma_bytes / runtime->channels; info->first = info->channel * size * 8; info->step = width; break; } default: snd_BUG(); break; } return 0; } static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_hw_params *params = arg; snd_pcm_format_t format; int channels; ssize_t frame_size; params->fifo_size = substream->runtime->hw.fifo_size; if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { format = params_format(params); channels = params_channels(params); frame_size = snd_pcm_format_size(format, channels); if (frame_size > 0) params->fifo_size /= (unsigned)frame_size; } return 0; } /** * snd_pcm_lib_ioctl - a generic PCM ioctl callback * @substream: the pcm substream instance * @cmd: ioctl command * @arg: ioctl argument * * Processes the generic ioctl commands for PCM. * Can be passed as the ioctl callback for PCM ops. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { switch (cmd) { case SNDRV_PCM_IOCTL1_INFO: return 0; case SNDRV_PCM_IOCTL1_RESET: return snd_pcm_lib_ioctl_reset(substream, arg); case SNDRV_PCM_IOCTL1_CHANNEL_INFO: return snd_pcm_lib_ioctl_channel_info(substream, arg); case SNDRV_PCM_IOCTL1_FIFO_SIZE: return snd_pcm_lib_ioctl_fifo_size(substream, arg); } return -ENXIO; } EXPORT_SYMBOL(snd_pcm_lib_ioctl); /** * snd_pcm_period_elapsed - update the pcm status for the next period * @substream: the pcm substream instance * * This function is called from the interrupt handler when the * PCM has processed the period size. It will update the current * pointer, wake up sleepers, etc. * * Even if more than one periods have elapsed since the last call, you * have to call this only once. */ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; unsigned long flags; if (PCM_RUNTIME_CHECK(substream)) return; runtime = substream->runtime; snd_pcm_stream_lock_irqsave(substream, flags); if (!snd_pcm_running(substream) || snd_pcm_update_hw_ptr0(substream, 1) < 0) goto _end; #ifdef CONFIG_SND_PCM_TIMER if (substream->timer_running) snd_timer_interrupt(substream->timer, 1); #endif _end: kill_fasync(&runtime->fasync, SIGIO, POLL_IN); snd_pcm_stream_unlock_irqrestore(substream, flags); } EXPORT_SYMBOL(snd_pcm_period_elapsed); /* * Wait until avail_min data becomes available * Returns a negative error code if any error occurs during operation. * The available space is stored on availp. When err = 0 and avail = 0 * on the capture stream, it indicates the stream is in DRAINING state. */ static int wait_for_avail(struct snd_pcm_substream *substream, snd_pcm_uframes_t *availp) { struct snd_pcm_runtime *runtime = substream->runtime; int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; wait_queue_t wait; int err = 0; snd_pcm_uframes_t avail = 0; long wait_time, tout; init_waitqueue_entry(&wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&runtime->tsleep, &wait); if (runtime->no_period_wakeup) wait_time = MAX_SCHEDULE_TIMEOUT; else { wait_time = 10; if (runtime->rate) { long t = runtime->period_size * 2 / runtime->rate; wait_time = max(t, wait_time); } wait_time = msecs_to_jiffies(wait_time * 1000); } for (;;) { if (signal_pending(current)) { err = -ERESTARTSYS; break; } /* * We need to check if space became available already * (and thus the wakeup happened already) first to close * the race of space already having become available. * This check must happen after been added to the waitqueue * and having current state be INTERRUPTIBLE. */ if (is_playback) avail = snd_pcm_playback_avail(runtime); else avail = snd_pcm_capture_avail(runtime); if (avail >= runtime->twake) break; snd_pcm_stream_unlock_irq(substream); tout = schedule_timeout(wait_time); snd_pcm_stream_lock_irq(substream); set_current_state(TASK_INTERRUPTIBLE); switch (runtime->status->state) { case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _endloop; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _endloop; case SNDRV_PCM_STATE_DRAINING: if (is_playback) err = -EPIPE; else avail = 0; /* indicate draining */ goto _endloop; case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_DISCONNECTED: err = -EBADFD; goto _endloop; case SNDRV_PCM_STATE_PAUSED: continue; } if (!tout) { pcm_dbg(substream->pcm, "%s write error (DMA or IRQ trouble?)\n", is_playback ? "playback" : "capture"); err = -EIO; break; } } _endloop: set_current_state(TASK_RUNNING); remove_wait_queue(&runtime->tsleep, &wait); *availp = avail; return err; } static int snd_pcm_lib_write_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; char __user *buf = (char __user *) data + frames_to_bytes(runtime, off); if (substream->ops->copy) { if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0) return err; } else { char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff); if (copy_from_user(hwbuf, buf, frames_to_bytes(runtime, frames))) return -EFAULT; } return 0; } typedef int (*transfer_f)(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t size); static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream, unsigned long data, snd_pcm_uframes_t size, int nonblock, transfer_f transfer) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t xfer = 0; snd_pcm_uframes_t offset = 0; snd_pcm_uframes_t avail; int err = 0; if (size == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: err = -EBADFD; goto _end_unlock; } runtime->twake = runtime->control->avail_min ? : 1; if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) snd_pcm_update_hw_ptr(substream); avail = snd_pcm_playback_avail(runtime); while (size > 0) { snd_pcm_uframes_t frames, appl_ptr, appl_ofs; snd_pcm_uframes_t cont; if (!avail) { if (nonblock) { err = -EAGAIN; goto _end_unlock; } runtime->twake = min_t(snd_pcm_uframes_t, size, runtime->control->avail_min ? : 1); err = wait_for_avail(substream, &avail); if (err < 0) goto _end_unlock; } frames = size > avail ? avail : size; cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size; if (frames > cont) frames = cont; if (snd_BUG_ON(!frames)) { runtime->twake = 0; snd_pcm_stream_unlock_irq(substream); return -EINVAL; } appl_ptr = runtime->control->appl_ptr; appl_ofs = appl_ptr % runtime->buffer_size; snd_pcm_stream_unlock_irq(substream); err = transfer(substream, appl_ofs, data, offset, frames); snd_pcm_stream_lock_irq(substream); if (err < 0) goto _end_unlock; switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: break; } appl_ptr += frames; if (appl_ptr >= runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; if (substream->ops->ack) substream->ops->ack(substream); offset += frames; size -= frames; xfer += frames; avail -= frames; if (runtime->status->state == SNDRV_PCM_STATE_PREPARED && snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } } _end_unlock: runtime->twake = 0; if (xfer > 0 && err >= 0) snd_pcm_update_state(substream, runtime); snd_pcm_stream_unlock_irq(substream); return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; } /* sanity-check for read/write methods */ static int pcm_sanity_check(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area)) return -EINVAL; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; return 0; } snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const void __user *buf, snd_pcm_uframes_t size) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && runtime->channels > 1) return -EINVAL; return snd_pcm_lib_write1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_write_transfer); } EXPORT_SYMBOL(snd_pcm_lib_write); static int snd_pcm_lib_writev_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; void __user **bufs = (void __user **)data; int channels = runtime->channels; int c; if (substream->ops->copy) { if (snd_BUG_ON(!substream->ops->silence)) return -EINVAL; for (c = 0; c < channels; ++c, ++bufs) { if (*bufs == NULL) { if ((err = substream->ops->silence(substream, c, hwoff, frames)) < 0) return err; } else { char __user *buf = *bufs + samples_to_bytes(runtime, off); if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0) return err; } } } else { /* default transfer behaviour */ size_t dma_csize = runtime->dma_bytes / channels; for (c = 0; c < channels; ++c, ++bufs) { char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff); if (*bufs == NULL) { snd_pcm_format_set_silence(runtime->format, hwbuf, frames); } else { char __user *buf = *bufs + samples_to_bytes(runtime, off); if (copy_from_user(hwbuf, buf, samples_to_bytes(runtime, frames))) return -EFAULT; } } } return 0; } snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; return snd_pcm_lib_write1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_writev_transfer); } EXPORT_SYMBOL(snd_pcm_lib_writev); static int snd_pcm_lib_read_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; char __user *buf = (char __user *) data + frames_to_bytes(runtime, off); if (substream->ops->copy) { if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0) return err; } else { char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff); if (copy_to_user(buf, hwbuf, frames_to_bytes(runtime, frames))) return -EFAULT; } return 0; } static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream, unsigned long data, snd_pcm_uframes_t size, int nonblock, transfer_f transfer) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t xfer = 0; snd_pcm_uframes_t offset = 0; snd_pcm_uframes_t avail; int err = 0; if (size == 0) return 0; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_PREPARED: if (size >= runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } break; case SNDRV_PCM_STATE_DRAINING: case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PAUSED: break; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: err = -EBADFD; goto _end_unlock; } runtime->twake = runtime->control->avail_min ? : 1; if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) snd_pcm_update_hw_ptr(substream); avail = snd_pcm_capture_avail(runtime); while (size > 0) { snd_pcm_uframes_t frames, appl_ptr, appl_ofs; snd_pcm_uframes_t cont; if (!avail) { if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); goto _end_unlock; } if (nonblock) { err = -EAGAIN; goto _end_unlock; } runtime->twake = min_t(snd_pcm_uframes_t, size, runtime->control->avail_min ? : 1); err = wait_for_avail(substream, &avail); if (err < 0) goto _end_unlock; if (!avail) continue; /* draining */ } frames = size > avail ? avail : size; cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size; if (frames > cont) frames = cont; if (snd_BUG_ON(!frames)) { runtime->twake = 0; snd_pcm_stream_unlock_irq(substream); return -EINVAL; } appl_ptr = runtime->control->appl_ptr; appl_ofs = appl_ptr % runtime->buffer_size; snd_pcm_stream_unlock_irq(substream); err = transfer(substream, appl_ofs, data, offset, frames); snd_pcm_stream_lock_irq(substream); if (err < 0) goto _end_unlock; switch (runtime->status->state) { case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _end_unlock; case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _end_unlock; default: break; } appl_ptr += frames; if (appl_ptr >= runtime->boundary) appl_ptr -= runtime->boundary; runtime->control->appl_ptr = appl_ptr; if (substream->ops->ack) substream->ops->ack(substream); offset += frames; size -= frames; xfer += frames; avail -= frames; } _end_unlock: runtime->twake = 0; if (xfer > 0 && err >= 0) snd_pcm_update_state(substream, runtime); snd_pcm_stream_unlock_irq(substream); return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; } snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __user *buf, snd_pcm_uframes_t size) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED) return -EINVAL; return snd_pcm_lib_read1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_read_transfer); } EXPORT_SYMBOL(snd_pcm_lib_read); static int snd_pcm_lib_readv_transfer(struct snd_pcm_substream *substream, unsigned int hwoff, unsigned long data, unsigned int off, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime = substream->runtime; int err; void __user **bufs = (void __user **)data; int channels = runtime->channels; int c; if (substream->ops->copy) { for (c = 0; c < channels; ++c, ++bufs) { char __user *buf; if (*bufs == NULL) continue; buf = *bufs + samples_to_bytes(runtime, off); if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0) return err; } } else { snd_pcm_uframes_t dma_csize = runtime->dma_bytes / channels; for (c = 0; c < channels; ++c, ++bufs) { char *hwbuf; char __user *buf; if (*bufs == NULL) continue; hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff); buf = *bufs + samples_to_bytes(runtime, off); if (copy_to_user(buf, hwbuf, samples_to_bytes(runtime, frames))) return -EFAULT; } } return 0; } snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { struct snd_pcm_runtime *runtime; int nonblock; int err; err = pcm_sanity_check(substream); if (err < 0) return err; runtime = substream->runtime; if (runtime->status->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; nonblock = !!(substream->f_flags & O_NONBLOCK); if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; return snd_pcm_lib_read1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_readv_transfer); } EXPORT_SYMBOL(snd_pcm_lib_readv); /* * standard channel mapping helpers */ /* default channel maps for multi-channel playbacks, up to 8 channels */ const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 6, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, { .channels = 8, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 6, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 8, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) { if (ch > info->max_channels) return false; return !info->channel_mask || (info->channel_mask & (1U << ch)); } static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 0; uinfo->count = info->max_channels; uinfo->value.integer.min = 0; uinfo->value.integer.max = SNDRV_CHMAP_LAST; return 0; } /* get callback for channel map ctl element * stores the channel position firstly matching with the current channels */ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); struct snd_pcm_substream *substream; const struct snd_pcm_chmap_elem *map; if (snd_BUG_ON(!info->chmap)) return -EINVAL; substream = snd_pcm_chmap_substream(info, idx); if (!substream) return -ENODEV; memset(ucontrol->value.integer.value, 0, sizeof(ucontrol->value.integer.value)); if (!substream->runtime) return 0; /* no channels set */ for (map = info->chmap; map->channels; map++) { int i; if (map->channels == substream->runtime->channels && valid_chmap_channels(info, map->channels)) { for (i = 0; i < map->channels; i++) ucontrol->value.integer.value[i] = map->map[i]; return 0; } } return -EINVAL; } /* tlv callback for channel map ctl element * expands the pre-defined channel maps in a form of TLV */ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); const struct snd_pcm_chmap_elem *map; unsigned int __user *dst; int c, count = 0; if (snd_BUG_ON(!info->chmap)) return -EINVAL; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) return -EFAULT; size -= 8; dst = tlv + 2; for (map = info->chmap; map->channels; map++) { int chs_bytes = map->channels * 4; if (!valid_chmap_channels(info, map->channels)) continue; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || put_user(chs_bytes, dst + 1)) return -EFAULT; dst += 2; size -= 8; count += 8; if (size < chs_bytes) return -ENOMEM; size -= chs_bytes; count += chs_bytes; for (c = 0; c < map->channels; c++) { if (put_user(map->map[c], dst)) return -EFAULT; dst++; } } if (put_user(count, tlv + 1)) return -EFAULT; return 0; } static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); info->pcm->streams[info->stream].chmap_kctl = NULL; kfree(info); } /** * snd_pcm_add_chmap_ctls - create channel-mapping control elements * @pcm: the assigned PCM instance * @stream: stream direction * @chmap: channel map elements (for query) * @max_channels: the max number of channels for the stream * @private_value: the value passed to each kcontrol's private_value field * @info_ret: store struct snd_pcm_chmap instance if non-NULL * * Create channel-mapping control elements assigned to the given PCM stream(s). * Return: Zero if successful, or a negative error value. */ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, const struct snd_pcm_chmap_elem *chmap, int max_channels, unsigned long private_value, struct snd_pcm_chmap **info_ret) { struct snd_pcm_chmap *info; struct snd_kcontrol_new knew = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, .info = pcm_chmap_ctl_info, .get = pcm_chmap_ctl_get, .tlv.c = pcm_chmap_ctl_tlv, }; int err; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->pcm = pcm; info->stream = stream; info->chmap = chmap; info->max_channels = max_channels; if (stream == SNDRV_PCM_STREAM_PLAYBACK) knew.name = "Playback Channel Map"; else knew.name = "Capture Channel Map"; knew.device = pcm->device; knew.count = pcm->streams[stream].substream_count; knew.private_value = private_value; info->kctl = snd_ctl_new1(&knew, info); if (!info->kctl) { kfree(info); return -ENOMEM; } info->kctl->private_free = pcm_chmap_ctl_private_free; err = snd_ctl_add(pcm->card, info->kctl); if (err < 0) return err; pcm->streams[stream].chmap_kctl = info->kctl; if (info_ret) *info_ret = info; return 0; } EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
./CrossVul/dataset_final_sorted/CWE-416/c/good_5503_0
crossvul-cpp_data_good_2566_0
/* * POSIX message queues filesystem for Linux. * * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) * Michal Wronski (michal.wronski@gmail.com) * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * * This file is released under the GPL. */ #include <linux/capability.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/pid.h> #include <linux/ipc_namespace.h> #include <linux/user_namespace.h> #include <linux/slab.h> #include <linux/sched/wake_q.h> #include <linux/sched/signal.h> #include <linux/sched/user.h> #include <net/sock.h> #include "util.h" #define MQUEUE_MAGIC 0x19800202 #define DIRENT_SIZE 20 #define FILENT_SIZE 80 #define SEND 0 #define RECV 1 #define STATE_NONE 0 #define STATE_READY 1 struct posix_msg_tree_node { struct rb_node rb_node; struct list_head msg_list; int priority; }; struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; struct list_head list; struct msg_msg *msg; /* ptr of loaded message */ int state; /* one of STATE_* values */ }; struct mqueue_inode_info { spinlock_t lock; struct inode vfs_inode; wait_queue_head_t wait_q; struct rb_root msg_tree; struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; struct pid *notify_owner; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; /* for tasks waiting for free space and messages, respectively */ struct ext_wait_queue e_wait_q[2]; unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); static struct kmem_cache *mqueue_inode_cachep; static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } /* * This routine should be called with the mq_lock held. */ static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) { return get_ipc_ns(inode->i_sb->s_fs_info); } static struct ipc_namespace *get_ns_from_inode(struct inode *inode) { struct ipc_namespace *ns; spin_lock(&mq_lock); ns = __get_ns_from_inode(inode); spin_unlock(&mq_lock); return ns; } /* Auxiliary functions to manipulate messages' list */ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; p = &info->msg_tree.rb_node; while (*p) { parent = *p; leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); if (likely(leaf->priority == msg->m_type)) goto insert_msg; else if (msg->m_type < leaf->priority) p = &(*p)->rb_left; else p = &(*p)->rb_right; } if (info->node_cache) { leaf = info->node_cache; info->node_cache = NULL; } else { leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); rb_insert_color(&leaf->rb_node, &info->msg_tree); insert_msg: info->attr.mq_curmsgs++; info->qsize += msg->m_ts; list_add_tail(&msg->m_list, &leaf->msg_list); return 0; } static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) { struct rb_node **p, *parent = NULL; struct posix_msg_tree_node *leaf; struct msg_msg *msg; try_again: p = &info->msg_tree.rb_node; while (*p) { parent = *p; /* * During insert, low priorities go to the left and high to the * right. On receive, we want the highest priorities first, so * walk all the way to the right. */ p = &(*p)->rb_right; } if (!parent) { if (info->attr.mq_curmsgs) { pr_warn_once("Inconsistency in POSIX message queue, " "no tree element, but supposedly messages " "should exist!\n"); info->attr.mq_curmsgs = 0; } return NULL; } leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); if (unlikely(list_empty(&leaf->msg_list))) { pr_warn_once("Inconsistency in POSIX message queue, " "empty leaf node but we haven't implemented " "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { kfree(leaf); } else { info->node_cache = leaf; } goto try_again; } else { msg = list_first_entry(&leaf->msg_list, struct msg_msg, m_list); list_del(&msg->m_list); if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { kfree(leaf); } else { info->node_cache = leaf; } } } info->attr.mq_curmsgs--; info->qsize -= msg->m_ts; return msg; } static struct inode *mqueue_get_inode(struct super_block *sb, struct ipc_namespace *ipc_ns, umode_t mode, struct mq_attr *attr) { struct user_struct *u = current_user(); struct inode *inode; int ret = -ENOMEM; inode = new_inode(sb); if (!inode) goto err; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); if (S_ISREG(mode)) { struct mqueue_inode_info *info; unsigned long mq_bytes, mq_treesize; inode->i_fop = &mqueue_file_operations; inode->i_size = FILENT_SIZE; /* mqueue specific info */ info = MQUEUE_I(inode); spin_lock_init(&info->lock); init_waitqueue_head(&info->wait_q); INIT_LIST_HEAD(&info->e_wait_q[0].list); INIT_LIST_HEAD(&info->e_wait_q[1].list); info->notify_owner = NULL; info->notify_user_ns = NULL; info->qsize = 0; info->user = NULL; /* set when all is ok */ info->msg_tree = RB_ROOT; info->node_cache = NULL; memset(&info->attr, 0, sizeof(info->attr)); info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, ipc_ns->mq_msg_default); info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, ipc_ns->mq_msgsize_default); if (attr) { info->attr.mq_maxmsg = attr->mq_maxmsg; info->attr.mq_msgsize = attr->mq_msgsize; } /* * We used to allocate a static array of pointers and account * the size of that array as well as one msg_msg struct per * possible message into the queue size. That's no longer * accurate as the queue is now an rbtree and will grow and * shrink depending on usage patterns. We can, however, still * account one msg_msg struct per message, but the nodes are * allocated depending on priority usage, and most programs * only use one, or a handful, of priorities. However, since * this is pinned memory, we need to assume worst case, so * that means the min(mq_maxmsg, max_priorities) * struct * posix_msg_tree_node. */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); mq_bytes = mq_treesize + (info->attr.mq_maxmsg * info->attr.mq_msgsize); spin_lock(&mq_lock); if (u->mq_bytes + mq_bytes < u->mq_bytes || u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); /* mqueue_evict_inode() releases info->messages */ ret = -EMFILE; goto out_inode; } u->mq_bytes += mq_bytes; spin_unlock(&mq_lock); /* all is ok */ info->user = get_uid(u); } else if (S_ISDIR(mode)) { inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ inode->i_size = 2 * DIRENT_SIZE; inode->i_op = &mqueue_dir_inode_operations; inode->i_fop = &simple_dir_operations; } return inode; out_inode: iput(inode); err: return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct ipc_namespace *ns = sb->s_fs_info; sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); if (IS_ERR(inode)) return PTR_ERR(inode); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct ipc_namespace *ns; if (flags & MS_KERNMOUNT) { ns = data; data = NULL; } else { ns = current->nsproxy->ipc_ns; } return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super); } static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; inode_init_once(&p->vfs_inode); } static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void mqueue_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } static void mqueue_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, mqueue_i_callback); } static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; unsigned long mq_bytes, mq_treesize; struct ipc_namespace *ipc_ns; struct msg_msg *msg; clear_inode(inode); if (S_ISDIR(inode->i_mode)) return; ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); while ((msg = msg_get(info)) != NULL) free_msg(msg); kfree(info->node_cache); spin_unlock(&info->lock); /* Total amount of bytes accounted for the mqueue */ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); mq_bytes = mq_treesize + (info->attr.mq_maxmsg * info->attr.mq_msgsize); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; /* * get_ns_from_inode() ensures that the * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns * to which we now hold a reference, or it is NULL. * We can't put it here under mq_lock, though. */ if (ipc_ns) ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } if (ipc_ns) put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); ipc_ns = __get_ns_from_inode(dir); if (!ipc_ns) { error = -EACCES; goto out_unlock; } if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; goto out_unlock; } ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); if (IS_ERR(inode)) { error = PTR_ERR(inode); spin_lock(&mq_lock); ipc_ns->mq_queues_count--; goto out_unlock; } put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); d_instantiate(dentry, inode); dget(dentry); return 0; out_unlock: spin_unlock(&mq_lock); if (ipc_ns) put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); dir->i_size -= DIRENT_SIZE; drop_nlink(inode); dput(dentry); return 0; } /* * This is routine for system read from queue file. * To avoid mess with doing here some sort of mq_receive we allow * to read only queue size & notification info (the only values * that are interesting from user point of view and aren't accessible * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, size_t count, loff_t *off) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); char buffer[FILENT_SIZE]; ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", info->qsize, info->notify_owner ? info->notify.sigev_notify : 0, (info->notify_owner && info->notify.sigev_notify == SIGEV_SIGNAL) ? info->notify.sigev_signo : 0, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; ret = simple_read_from_buffer(u_data, count, off, buffer, strlen(buffer)); if (ret <= 0) return ret; file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); spin_lock(&info->lock); if (task_tgid(current) == info->notify_owner) remove_notification(info); spin_unlock(&info->lock); return 0; } static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); spin_lock(&info->lock); if (info->attr.mq_curmsgs) retval = POLLIN | POLLRDNORM; if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) retval |= POLLOUT | POLLWRNORM; spin_unlock(&info->lock); return retval; } /* Adds current to info->e_wait_q[sr] before element with smaller prio */ static void wq_add(struct mqueue_inode_info *info, int sr, struct ext_wait_queue *ewp) { struct ext_wait_queue *walk; ewp->task = current; list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { if (walk->task->static_prio <= current->static_prio) { list_add_tail(&ewp->list, &walk->list); return; } } list_add_tail(&ewp->list, &info->e_wait_q[sr].list); } /* * Puts current task to sleep. Caller must hold queue lock. After return * lock isn't held. * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, ktime_t *timeout, struct ext_wait_queue *ewp) __releases(&info->lock) { int retval; signed long time; wq_add(info, sr, ewp); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); time = schedule_hrtimeout_range_clock(timeout, 0, HRTIMER_MODE_ABS, CLOCK_REALTIME); if (ewp->state == STATE_READY) { retval = 0; goto out; } spin_lock(&info->lock); if (ewp->state == STATE_READY) { retval = 0; goto out_unlock; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (time == 0) { retval = -ETIMEDOUT; break; } } list_del(&ewp->list); out_unlock: spin_unlock(&info->lock); out: return retval; } /* * Returns waiting task that should be serviced first or NULL if none exists */ static struct ext_wait_queue *wq_get_first_waiter( struct mqueue_inode_info *info, int sr) { struct list_head *ptr; ptr = info->e_wait_q[sr].list.prev; if (ptr == &info->e_wait_q[sr].list) return NULL; return list_entry(ptr, struct ext_wait_queue, list); } static inline void set_cookie(struct sk_buff *skb, char code) { ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* * The next function is only to split too long sys_mq_timedsend */ static void __do_notify(struct mqueue_inode_info *info) { /* notification * invoked when there is registered process and there isn't process * waiting synchronously for message AND state of queue changed from * empty to not empty. Here we are sure that no one is waiting * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; case SIGEV_SIGNAL: /* sends signal */ sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); rcu_read_unlock(); kill_pid_info(info->notify.sigev_signo, &sig_i, info->notify_owner); break; case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); break; } /* after notification unregisters process */ put_pid(info->notify_owner); put_user_ns(info->notify_user_ns); info->notify_owner = NULL; info->notify_user_ns = NULL; } wake_up(&info->wait_q); } static int prepare_timeout(const struct timespec __user *u_abs_timeout, struct timespec *ts) { if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; return 0; } static void remove_notification(struct mqueue_inode_info *info) { if (info->notify_owner != NULL && info->notify.sigev_notify == SIGEV_THREAD) { set_cookie(info->notify_cookie, NOTIFY_REMOVED); netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); put_user_ns(info->notify_user_ns); info->notify_owner = NULL; info->notify_user_ns = NULL; } static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { int mq_treesize; unsigned long total_size; if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) return -EINVAL; if (capable(CAP_SYS_RESOURCE)) { if (attr->mq_maxmsg > HARD_MSGMAX || attr->mq_msgsize > HARD_MSGSIZEMAX) return -EINVAL; } else { if (attr->mq_maxmsg > ipc_ns->mq_msg_max || attr->mq_msgsize > ipc_ns->mq_msgsize_max) return -EINVAL; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) return -EOVERFLOW; mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * sizeof(struct posix_msg_tree_node); total_size = attr->mq_maxmsg * attr->mq_msgsize; if (total_size + mq_treesize < total_size) return -EOVERFLOW; return 0; } /* * Invoked when creating a new queue via sys_mq_open */ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, struct path *path, int oflag, umode_t mode, struct mq_attr *attr) { const struct cred *cred = current_cred(); int ret; if (attr) { ret = mq_attr_ok(ipc_ns, attr); if (ret) return ERR_PTR(ret); /* store for use during create */ path->dentry->d_fsdata = attr; } else { struct mq_attr def_attr; def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, ipc_ns->mq_msg_default); def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, ipc_ns->mq_msgsize_default); ret = mq_attr_ok(ipc_ns, &def_attr); if (ret) return ERR_PTR(ret); } mode &= ~current_umask(); ret = vfs_create(dir, path->dentry, mode, true); path->dentry->d_fsdata = NULL; if (ret) return ERR_PTR(ret); return dentry_open(path, oflag, cred); } /* Opens existing queue */ static struct file *do_open(struct path *path, int oflag) { static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, MAY_READ | MAY_WRITE }; int acc; if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) return ERR_PTR(-EINVAL); acc = oflag2acc[oflag & O_ACCMODE]; if (inode_permission(d_inode(path->dentry), acc)) return ERR_PTR(-EACCES); return dentry_open(path, oflag, current_cred()); } static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, struct mq_attr *attr) { struct path path; struct file *filp; struct filename *name; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; struct dentry *root = mnt->mnt_root; int ro; audit_mq_open(oflag, mode, attr); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; ro = mnt_want_write(mnt); /* we'll drop it in any case */ error = 0; inode_lock(d_inode(root)); path.dentry = lookup_one_len(name->name, root, strlen(name->name)); if (IS_ERR(path.dentry)) { error = PTR_ERR(path.dentry); goto out_putfd; } path.mnt = mntget(mnt); if (oflag & O_CREAT) { if (d_really_is_positive(path.dentry)) { /* entry already exists */ audit_inode(name, path.dentry, 0); if (oflag & O_EXCL) { error = -EEXIST; goto out; } filp = do_open(&path, oflag); } else { if (ro) { error = ro; goto out; } audit_inode_parent_hidden(name, root); filp = do_create(ipc_ns, d_inode(root), &path, oflag, mode, attr); } } else { if (d_really_is_negative(path.dentry)) { error = -ENOENT; goto out; } audit_inode(name, path.dentry, 0); filp = do_open(&path, oflag); } if (!IS_ERR(filp)) fd_install(fd, filp); else error = PTR_ERR(filp); out: path_put(&path); out_putfd: if (error) { put_unused_fd(fd); fd = error; } inode_unlock(d_inode(root)); if (!ro) mnt_drop_write(mnt); out_putname: putname(name); return fd; } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { struct mq_attr attr; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); } SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; struct filename *name; struct dentry *dentry; struct inode *inode = NULL; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); audit_inode_parent_hidden(name, mnt->mnt_root); err = mnt_want_write(mnt); if (err) goto out_name; inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); dentry = lookup_one_len(name->name, mnt->mnt_root, strlen(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } inode = d_inode(dentry); if (!inode) { err = -ENOENT; } else { ihold(inode); err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); } dput(dentry); out_unlock: inode_unlock(d_inode(mnt->mnt_root)); if (inode) iput(inode); mnt_drop_write(mnt); out_name: putname(name); return err; } /* Pipelined send and receive functions. * * If a receiver finds no waiting message, then it registers itself in the * list of waiting receivers. A sender checks that list before adding the new * message into the message array. If there is a waiting receiver, then it * bypasses the message array and directly hands the message over to the * receiver. The receiver accepts the message and returns without grabbing the * queue spinlock: * * - Set pointer to message. * - Queue the receiver task for later wakeup (without the info->lock). * - Update its state to STATE_READY. Now the receiver can continue. * - Wake up the process after the lock is dropped. Should the process wake up * before this wakeup (due to a timeout or a signal) it will either see * STATE_READY and continue or acquire the lock to check the state again. * * The same algorithm is used for senders. */ /* pipelined_send() - send a message directly to the task waiting in * sys_mq_timedreceive() (without inserting message into a queue). */ static inline void pipelined_send(struct wake_q_head *wake_q, struct mqueue_inode_info *info, struct msg_msg *message, struct ext_wait_queue *receiver) { receiver->msg = message; list_del(&receiver->list); wake_q_add(wake_q, receiver->task); /* * Rely on the implicit cmpxchg barrier from wake_q_add such * that we can ensure that updating receiver->state is the last * write operation: As once set, the receiver can continue, * and if we don't have the reference count from the wake_q, * yet, at that point we can later have a use-after-free * condition and bogus wakeup. */ receiver->state = STATE_READY; } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() * gets its message and put to the queue (we have one free place for sure). */ static inline void pipelined_receive(struct wake_q_head *wake_q, struct mqueue_inode_info *info) { struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); if (!sender) { /* for poll */ wake_up_interruptible(&info->wait_q); return; } if (msg_insert(sender->msg, info)) return; list_del(&sender->list); wake_q_add(wake_q, sender->task); sender->state = STATE_READY; } static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, size_t msg_len, unsigned int msg_prio, struct timespec *ts) { struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; ktime_t expires, *timeout = NULL; struct posix_msg_tree_node *new_leaf = NULL; int ret = 0; DEFINE_WAKE_Q(wake_q); if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; if (ts) { expires = timespec_to_ktime(*ts); timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); f = fdget(mqdes); if (unlikely(!f.file)) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_file(f.file); if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { ret = -EBADF; goto out_fput; } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* First try to allocate memory, before doing anything with * existing queues. */ msg_ptr = load_msg(u_msg_ptr, msg_len); if (IS_ERR(msg_ptr)) { ret = PTR_ERR(msg_ptr); goto out_fput; } msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; /* * msg_insert really wants us to have a valid, spare node struct so * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will * fall back to that if necessary. */ if (!info->node_cache) new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); spin_lock(&info->lock); if (!info->node_cache && new_leaf) { /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; new_leaf = NULL; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { if (f.file->f_flags & O_NONBLOCK) { ret = -EAGAIN; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); /* * wq_sleep must be called with info->lock held, and * returns with the lock released */ goto out_free; } } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(&wake_q, info, msg_ptr, receiver); } else { /* adds message to the queue */ ret = msg_insert(msg_ptr, info); if (ret) goto out_unlock; __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); } out_unlock: spin_unlock(&info->lock); wake_up_q(&wake_q); out_free: if (ret) free_msg(msg_ptr); out_fput: fdput(f); out: return ret; } static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, size_t msg_len, unsigned int __user *u_msg_prio, struct timespec *ts) { ssize_t ret; struct msg_msg *msg_ptr; struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; ktime_t expires, *timeout = NULL; struct posix_msg_tree_node *new_leaf = NULL; if (ts) { expires = timespec_to_ktime(*ts); timeout = &expires; } audit_mq_sendrecv(mqdes, msg_len, 0, ts); f = fdget(mqdes); if (unlikely(!f.file)) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); audit_file(f.file); if (unlikely(!(f.file->f_mode & FMODE_READ))) { ret = -EBADF; goto out_fput; } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { ret = -EMSGSIZE; goto out_fput; } /* * msg_insert really wants us to have a valid, spare node struct so * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will * fall back to that if necessary. */ if (!info->node_cache) new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); spin_lock(&info->lock); if (!info->node_cache && new_leaf) { /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; } else { kfree(new_leaf); } if (info->attr.mq_curmsgs == 0) { if (f.file->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; } else { wait.task = current; wait.state = STATE_NONE; ret = wq_sleep(info, RECV, timeout, &wait); msg_ptr = wait.msg; } } else { DEFINE_WAKE_Q(wake_q); msg_ptr = msg_get(info); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); /* There is now free space in queue. */ pipelined_receive(&wake_q, info); spin_unlock(&info->lock); wake_up_q(&wake_q); ret = 0; } if (ret == 0) { ret = msg_ptr->m_ts; if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { ret = -EFAULT; } free_msg(msg_ptr); } out_fput: fdput(f); out: return ret; } SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, size_t, msg_len, unsigned int, msg_prio, const struct timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); } SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, size_t, msg_len, unsigned int __user *, u_msg_prio, const struct timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); } /* * Notes: the case when user wants us to deregister (with NULL as pointer) * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) { sock = NULL; goto retry; } if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; } SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct sigevent __user *, u_notification) { struct sigevent n, *p = NULL; if (u_notification) { if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) return -EFAULT; p = &n; } return do_mq_notify(mqdes, p); } static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) { struct fd f; struct inode *inode; struct mqueue_inode_info *info; if (new && (new->mq_flags & (~O_NONBLOCK))) return -EINVAL; f = fdget(mqdes); if (!f.file) return -EBADF; if (unlikely(f.file->f_op != &mqueue_file_operations)) { fdput(f); return -EBADF; } inode = file_inode(f.file); info = MQUEUE_I(inode); spin_lock(&info->lock); if (old) { *old = info->attr; old->mq_flags = f.file->f_flags & O_NONBLOCK; } if (new) { audit_mq_getsetattr(mqdes, new); spin_lock(&f.file->f_lock); if (new->mq_flags & O_NONBLOCK) f.file->f_flags |= O_NONBLOCK; else f.file->f_flags &= ~O_NONBLOCK; spin_unlock(&f.file->f_lock); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); fdput(f); return 0; } SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct mq_attr __user *, u_mqstat, struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct mq_attr *new = NULL, *old = NULL; if (u_mqstat) { new = &mqstat; if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) return -EFAULT; } if (u_omqstat) old = &omqstat; ret = do_mq_getsetattr(mqdes, new, old); if (ret || !old) return ret; if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT struct compat_mq_attr { compat_long_t mq_flags; /* message queue flags */ compat_long_t mq_maxmsg; /* maximum number of messages */ compat_long_t mq_msgsize; /* maximum message size */ compat_long_t mq_curmsgs; /* number of messages currently queued */ compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ }; static inline int get_compat_mq_attr(struct mq_attr *attr, const struct compat_mq_attr __user *uattr) { struct compat_mq_attr v; if (copy_from_user(&v, uattr, sizeof(*uattr))) return -EFAULT; memset(attr, 0, sizeof(*attr)); attr->mq_flags = v.mq_flags; attr->mq_maxmsg = v.mq_maxmsg; attr->mq_msgsize = v.mq_msgsize; attr->mq_curmsgs = v.mq_curmsgs; return 0; } static inline int put_compat_mq_attr(const struct mq_attr *attr, struct compat_mq_attr __user *uattr) { struct compat_mq_attr v; memset(&v, 0, sizeof(v)); v.mq_flags = attr->mq_flags; v.mq_maxmsg = attr->mq_maxmsg; v.mq_msgsize = attr->mq_msgsize; v.mq_curmsgs = attr->mq_curmsgs; if (copy_to_user(uattr, &v, sizeof(*uattr))) return -EFAULT; return 0; } COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, compat_mode_t, mode, struct compat_mq_attr __user *, u_attr) { struct mq_attr attr, *p = NULL; if (u_attr && oflag & O_CREAT) { p = &attr; if (get_compat_mq_attr(&attr, u_attr)) return -EFAULT; } return do_mq_open(u_name, oflag, mode, p); } static int compat_prepare_timeout(const struct compat_timespec __user *p, struct timespec *ts) { if (compat_get_timespec(ts, p)) return -EFAULT; if (!timespec_valid(ts)) return -EINVAL; return 0; } COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int, msg_prio, const struct compat_timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = compat_prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); } COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int __user *, u_msg_prio, const struct compat_timespec __user *, u_abs_timeout) { struct timespec ts, *p = NULL; if (u_abs_timeout) { int res = compat_prepare_timeout(u_abs_timeout, &ts); if (res) return res; p = &ts; } return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); } COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, const struct compat_sigevent __user *, u_notification) { struct sigevent n, *p = NULL; if (u_notification) { if (get_compat_sigevent(&n, u_notification)) return -EFAULT; if (n.sigev_notify == SIGEV_THREAD) n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); p = &n; } return do_mq_notify(mqdes, p); } COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, const struct compat_mq_attr __user *, u_mqstat, struct compat_mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; struct mq_attr *new = NULL, *old = NULL; if (u_mqstat) { new = &mqstat; if (get_compat_mq_attr(new, u_mqstat)) return -EFAULT; } if (u_omqstat) old = &omqstat; ret = do_mq_getsetattr(mqdes, new, old); if (ret || !old) return ret; if (put_compat_mq_attr(old, u_omqstat)) return -EFAULT; return 0; } #endif static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, .unlink = mqueue_unlink, }; static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, .llseek = default_llseek, }; static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", .mount = mqueue_mount, .kill_sb = kill_litter_super, .fs_flags = FS_USERNS_MOUNT, }; int mq_init_ns(struct ipc_namespace *ns) { ns->mq_queues_count = 0; ns->mq_queues_max = DFLT_QUEUESMAX; ns->mq_msg_max = DFLT_MSGMAX; ns->mq_msgsize_max = DFLT_MSGSIZEMAX; ns->mq_msg_default = DFLT_MSG; ns->mq_msgsize_default = DFLT_MSGSIZE; ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); if (IS_ERR(ns->mq_mnt)) { int err = PTR_ERR(ns->mq_mnt); ns->mq_mnt = NULL; return err; } return 0; } void mq_clear_sbinfo(struct ipc_namespace *ns) { ns->mq_mnt->mnt_sb->s_fs_info = NULL; } void mq_put_mnt(struct ipc_namespace *ns) { kern_unmount(ns->mq_mnt); } static int __init init_mqueue_fs(void) { int error; mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; /* ignore failures - they are not fatal */ mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; spin_lock_init(&mq_lock); error = mq_init_ns(&init_ipc_ns); if (error) goto out_filesystem; return 0; out_filesystem: unregister_filesystem(&mqueue_fs_type); out_sysctl: if (mq_sysctl_table) unregister_sysctl_table(mq_sysctl_table); kmem_cache_destroy(mqueue_inode_cachep); return error; } device_initcall(init_mqueue_fs);
./CrossVul/dataset_final_sorted/CWE-416/c/good_2566_0
crossvul-cpp_data_good_822_1
/* * Generic hugetlb support. * (C) Nadia Yvette Chambers, April 2004 */ #include <linux/list.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/sysctl.h> #include <linux/highmem.h> #include <linux/mmu_notifier.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/compiler.h> #include <linux/cpuset.h> #include <linux/mutex.h> #include <linux/memblock.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/mmdebug.h> #include <linux/sched/signal.h> #include <linux/rmap.h> #include <linux/string_helpers.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/jhash.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlb.h> #include <linux/io.h> #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #include <linux/node.h> #include <linux/userfaultfd_k.h> #include <linux/page_owner.h> #include "internal.h" int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; /* * Minimum page order among possible hugepage sizes, set to a proper value * at boot time. */ static unsigned int minimum_order __read_mostly = UINT_MAX; __initdata LIST_HEAD(huge_boot_pages); /* for command line parsing */ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static unsigned long __initdata default_hstate_size; static bool __initdata parsed_valid_hugepagesz = true; /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, * free_huge_pages, and surplus_huge_pages. */ DEFINE_SPINLOCK(hugetlb_lock); /* * Serializes faults on the same logical page. This is used to * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes; struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) { bool free = (spool->count == 0) && (spool->used_hpages == 0); spin_unlock(&spool->lock); /* If no pages are used, and no other handles to the subpool * remain, give up any reservations mased on minimum size and * free the subpool */ if (free) { if (spool->min_hpages != -1) hugetlb_acct_memory(spool->hstate, -spool->min_hpages); kfree(spool); } } struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, long min_hpages) { struct hugepage_subpool *spool; spool = kzalloc(sizeof(*spool), GFP_KERNEL); if (!spool) return NULL; spin_lock_init(&spool->lock); spool->count = 1; spool->max_hpages = max_hpages; spool->hstate = h; spool->min_hpages = min_hpages; if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { kfree(spool); return NULL; } spool->rsv_hpages = min_hpages; return spool; } void hugepage_put_subpool(struct hugepage_subpool *spool) { spin_lock(&spool->lock); BUG_ON(!spool->count); spool->count--; unlock_or_release_subpool(spool); } /* * Subpool accounting for allocating and reserving pages. * Return -ENOMEM if there are not enough resources to satisfy the * the request. Otherwise, return the number of pages by which the * global pools must be adjusted (upward). The returned value may * only be different than the passed value (delta) in the case where * a subpool minimum size must be manitained. */ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return ret; spin_lock(&spool->lock); if (spool->max_hpages != -1) { /* maximum size accounting */ if ((spool->used_hpages + delta) <= spool->max_hpages) spool->used_hpages += delta; else { ret = -ENOMEM; goto unlock_ret; } } /* minimum size accounting */ if (spool->min_hpages != -1 && spool->rsv_hpages) { if (delta > spool->rsv_hpages) { /* * Asking for more reserves than those already taken on * behalf of subpool. Return difference. */ ret = delta - spool->rsv_hpages; spool->rsv_hpages = 0; } else { ret = 0; /* reserves already accounted for */ spool->rsv_hpages -= delta; } } unlock_ret: spin_unlock(&spool->lock); return ret; } /* * Subpool accounting for freeing and unreserving pages. * Return the number of global page reservations that must be dropped. * The return value may only be different than the passed value (delta) * in the case where a subpool minimum size must be maintained. */ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return delta; spin_lock(&spool->lock); if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; /* minimum size accounting */ if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { if (spool->rsv_hpages + delta <= spool->min_hpages) ret = 0; else ret = spool->rsv_hpages + delta - spool->min_hpages; spool->rsv_hpages += delta; if (spool->rsv_hpages > spool->min_hpages) spool->rsv_hpages = spool->min_hpages; } /* * If hugetlbfs_put_super couldn't free spool due to an outstanding * quota reference, free it now. */ unlock_or_release_subpool(spool); return ret; } static inline struct hugepage_subpool *subpool_inode(struct inode *inode) { return HUGETLBFS_SB(inode->i_sb)->spool; } static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) { return subpool_inode(file_inode(vma->vm_file)); } /* * Region tracking -- allows tracking of reservations and instantiated pages * across the pages in a mapping. * * The region data structures are embedded into a resv_map and protected * by a resv_map's lock. The set of regions within the resv_map represent * reservations for huge pages, or huge pages that have already been * instantiated within the map. The from and to elements are huge page * indicies into the associated mapping. from indicates the starting index * of the region. to represents the first index past the end of the region. * * For example, a file region structure with from == 0 and to == 4 represents * four huge pages in a mapping. It is important to note that the to element * represents the first element past the end of the region. This is used in * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. * * Interval notation of the form [from, to) will be used to indicate that * the endpoint from is inclusive and to is exclusive. */ struct file_region { struct list_head link; long from; long to; }; /* * Add the huge page range represented by [f, t) to the reserve * map. In the normal case, existing regions will be expanded * to accommodate the specified range. Sufficient regions should * exist for expansion due to the previous call to region_chg * with the same range. However, it is possible that region_del * could have been called after region_chg and modifed the map * in such a way that no region exists to be expanded. In this * case, pull a region descriptor from the cache associated with * the map and use that for the new range. * * Return the number of new huge pages added to the map. This * number is greater than or equal to zero. */ static long region_add(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *nrg, *trg; long add = 0; spin_lock(&resv->lock); /* Locate the region we are either in or before. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* * If no region exists which can be expanded to include the * specified range, the list must have been modified by an * interleving call to region_del(). Pull a region descriptor * from the cache and use it for this range. */ if (&rg->link == head || t < rg->from) { VM_BUG_ON(resv->region_cache_count <= 0); resv->region_cache_count--; nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); nrg->from = f; nrg->to = t; list_add(&nrg->link, rg->link.prev); add += t - f; goto out_locked; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; /* Check for and consume any regions we now overlap with. */ nrg = rg; list_for_each_entry_safe(rg, trg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) break; /* If this area reaches higher then extend our area to * include it completely. If this is not the first area * which we intend to reuse, free it. */ if (rg->to > t) t = rg->to; if (rg != nrg) { /* Decrement return value by the deleted range. * Another range will span this area so that by * end of routine add will be >= zero */ add -= (rg->to - rg->from); list_del(&rg->link); kfree(rg); } } add += (nrg->from - f); /* Added to beginning of region */ nrg->from = f; add += t - nrg->to; /* Added to end of region */ nrg->to = t; out_locked: resv->adds_in_progress--; spin_unlock(&resv->lock); VM_BUG_ON(add < 0); return add; } /* * Examine the existing reserve map and determine how many * huge pages in the specified range [f, t) are NOT currently * represented. This routine is called before a subsequent * call to region_add that will actually modify the reserve * map to add the specified range [f, t). region_chg does * not change the number of huge pages represented by the * map. However, if the existing regions in the map can not * be expanded to represent the new range, a new file_region * structure is added to the map as a placeholder. This is * so that the subsequent region_add call will have all the * regions it needs and will not fail. * * Upon entry, region_chg will also examine the cache of region descriptors * associated with the map. If there are not enough descriptors cached, one * will be allocated for the in progress add operation. * * Returns the number of huge pages that need to be added to the existing * reservation map for the range [f, t). This number is greater or equal to * zero. -ENOMEM is returned if a new file_region structure or cache entry * is needed and can not be allocated. */ static long region_chg(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *nrg = NULL; long chg = 0; retry: spin_lock(&resv->lock); retry_locked: resv->adds_in_progress++; /* * Check for sufficient descriptors in the cache to accommodate * the number of in progress add operations. */ if (resv->adds_in_progress > resv->region_cache_count) { struct file_region *trg; VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); /* Must drop lock to allocate a new descriptor. */ resv->adds_in_progress--; spin_unlock(&resv->lock); trg = kmalloc(sizeof(*trg), GFP_KERNEL); if (!trg) { kfree(nrg); return -ENOMEM; } spin_lock(&resv->lock); list_add(&trg->link, &resv->region_cache); resv->region_cache_count++; goto retry_locked; } /* Locate the region we are before or in. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* If we are below the current region then a new region is required. * Subtle, allocate a new region at the position but make it zero * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { if (!nrg) { resv->adds_in_progress--; spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; nrg->from = f; nrg->to = f; INIT_LIST_HEAD(&nrg->link); goto retry; } list_add(&nrg->link, rg->link.prev); chg = t - f; goto out_nrg; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; chg = t - f; /* Check for and consume any regions we now overlap with. */ list_for_each_entry(rg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) goto out; /* We overlap with this area, if it extends further than * us then we must extend ourselves. Account for its * existing reservation. */ if (rg->to > t) { chg += rg->to - t; t = rg->to; } chg -= rg->to - rg->from; } out: spin_unlock(&resv->lock); /* We already know we raced and no longer need the new region */ kfree(nrg); return chg; out_nrg: spin_unlock(&resv->lock); return chg; } /* * Abort the in progress add operation. The adds_in_progress field * of the resv_map keeps track of the operations in progress between * calls to region_chg and region_add. Operations are sometimes * aborted after the call to region_chg. In such cases, region_abort * is called to decrement the adds_in_progress counter. * * NOTE: The range arguments [f, t) are not needed or used in this * routine. They are kept to make reading the calling code easier as * arguments will match the associated region_chg call. */ static void region_abort(struct resv_map *resv, long f, long t) { spin_lock(&resv->lock); VM_BUG_ON(!resv->region_cache_count); resv->adds_in_progress--; spin_unlock(&resv->lock); } /* * Delete the specified range [f, t) from the reserve map. If the * t parameter is LONG_MAX, this indicates that ALL regions after f * should be deleted. Locate the regions which intersect [f, t) * and either trim, delete or split the existing regions. * * Returns the number of huge pages deleted from the reserve map. * In the normal case, the return value is zero or more. In the * case where a region must be split, a new region descriptor must * be allocated. If the allocation fails, -ENOMEM will be returned. * NOTE: If the parameter t == LONG_MAX, then we will never split * a region and possibly return -ENOMEM. Callers specifying * t == LONG_MAX do not need to check for -ENOMEM error. */ static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; struct file_region *nrg = NULL; long del = 0; retry: spin_lock(&resv->lock); list_for_each_entry_safe(rg, trg, head, link) { /* * Skip regions before the range to be deleted. file_region * ranges are normally of the form [from, to). However, there * may be a "placeholder" entry in the map which is of the form * (from, to) with from == to. Check for placeholder entries * at the beginning of the range to be deleted. */ if (rg->to <= f && (rg->to != rg->from || rg->to != f)) continue; if (rg->from >= t) break; if (f > rg->from && t < rg->to) { /* Must split region */ /* * Check for an entry in the cache before dropping * lock and attempting allocation. */ if (!nrg && resv->region_cache_count > resv->adds_in_progress) { nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); resv->region_cache_count--; } if (!nrg) { spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; goto retry; } del += t - f; /* New entry for end of split region */ nrg->from = t; nrg->to = rg->to; INIT_LIST_HEAD(&nrg->link); /* Original entry is trimmed */ rg->to = f; list_add(&nrg->link, &rg->link); nrg = NULL; break; } if (f <= rg->from && t >= rg->to) { /* Remove entire region */ del += rg->to - rg->from; list_del(&rg->link); kfree(rg); continue; } if (f <= rg->from) { /* Trim beginning of region */ del += t - rg->from; rg->from = t; } else { /* Trim end of region */ del += rg->to - f; rg->to = f; } } spin_unlock(&resv->lock); kfree(nrg); return del; } /* * A rare out of memory error was encountered which prevented removal of * the reserve map region for a page. The huge page itself was free'ed * and removed from the page cache. This routine will adjust the subpool * usage count, and the global reserve count if needed. By incrementing * these counts, the reserve map entry which could not be deleted will * appear as a "reserved" entry instead of simply dangling with incorrect * counts. */ void hugetlb_fix_reserve_counts(struct inode *inode) { struct hugepage_subpool *spool = subpool_inode(inode); long rsv_adjust; rsv_adjust = hugepage_subpool_get_pages(spool, 1); if (rsv_adjust) { struct hstate *h = hstate_inode(inode); hugetlb_acct_memory(h, 1); } } /* * Count and return the number of huge pages in the reserve map * that intersect with the range [f, t). */ static long region_count(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg; long chg = 0; spin_lock(&resv->lock); /* Locate each segment we overlap with, and count that overlap. */ list_for_each_entry(rg, head, link) { long seg_from; long seg_to; if (rg->to <= f) continue; if (rg->from >= t) break; seg_from = max(rg->from, f); seg_to = min(rg->to, t); chg += seg_to - seg_from; } spin_unlock(&resv->lock); return chg; } /* * Convert the address within this vma to the page offset within * the mapping, in pagecache page units; huge pages here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return ((address - vma->vm_start) >> huge_page_shift(h)) + (vma->vm_pgoff >> huge_page_order(h)); } pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address) { return vma_hugecache_offset(hstate_vma(vma), vma, address); } EXPORT_SYMBOL_GPL(linear_hugepage_index); /* * Return the size of the pages allocated when backing a VMA. In the majority * cases this will be same size as used by the page table entries. */ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { if (vma->vm_ops && vma->vm_ops->pagesize) return vma->vm_ops->pagesize(vma); return PAGE_SIZE; } EXPORT_SYMBOL_GPL(vma_kernel_pagesize); /* * Return the page size being used by the MMU to back a VMA. In the majority * of cases, the page size used by the kernel matches the MMU size. On * architectures where it differs, an architecture-specific 'strong' * version of this symbol is required. */ __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return vma_kernel_pagesize(vma); } /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to * alignment. */ #define HPAGE_RESV_OWNER (1UL << 0) #define HPAGE_RESV_UNMAPPED (1UL << 1) #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) /* * These helpers are used to track how many pages are reserved for * faults in a MAP_PRIVATE mapping. Only the process that called mmap() * is guaranteed to have their future faults succeed. * * With the exception of reset_vma_resv_huge_pages() which is called at fork(), * the reserve counters are updated with the hugetlb_lock held. It is safe * to reset the VMA at fork() time as it is not in use yet and there is no * chance of the global counters getting corrupted as a result of the values. * * The private mapping reservation is represented in a subtly different * manner to a shared mapping. A shared mapping has a region map associated * with the underlying file, this region map represents the backing file * pages which have ever had a reservation assigned which this persists even * after the page is instantiated. A private mapping has a region map * associated with the original mmap which is attached to all VMAs which * reference it, this region map represents those offsets which have consumed * reservation ie. where pages have been instantiated. */ static unsigned long get_vma_private_data(struct vm_area_struct *vma) { return (unsigned long)vma->vm_private_data; } static void set_vma_private_data(struct vm_area_struct *vma, unsigned long value) { vma->vm_private_data = (void *)value; } struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); if (!resv_map || !rg) { kfree(resv_map); kfree(rg); return NULL; } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); resv_map->adds_in_progress = 0; INIT_LIST_HEAD(&resv_map->region_cache); list_add(&rg->link, &resv_map->region_cache); resv_map->region_cache_count = 1; return resv_map; } void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); struct list_head *head = &resv_map->region_cache; struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ region_del(resv_map, 0, LONG_MAX); /* ... and any entries left in the cache */ list_for_each_entry_safe(rg, trg, head, link) { list_del(&rg->link); kfree(rg); } VM_BUG_ON(resv_map->adds_in_progress); kfree(resv_map); } static inline struct resv_map *inode_resv_map(struct inode *inode) { return inode->i_mapping->private_data; } static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; return inode_resv_map(inode); } else { return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); } } static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); } static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; } /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; } /* Returns true if the VMA has associated reserve pages */ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) { if (vma->vm_flags & VM_NORESERVE) { /* * This address is already reserved by other process(chg == 0), * so, we should decrement reserved count. Without decrementing, * reserve count remains after releasing inode, because this * allocated page will go into page cache and is regarded as * coming from reserved pool in releasing step. Currently, we * don't have any other solution to deal with this situation * properly, so add work-around here. */ if (vma->vm_flags & VM_MAYSHARE && chg == 0) return true; else return false; } /* Shared mappings always use reserves */ if (vma->vm_flags & VM_MAYSHARE) { /* * We know VM_NORESERVE is not set. Therefore, there SHOULD * be a region map for all pages. The only situation where * there is no region map is if a hole was punched via * fallocate. In this case, there really are no reverves to * use. This situation is indicated if chg != 0. */ if (chg) return false; else return true; } /* * Only the process that called mmap() has reserves for * private mappings. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { /* * Like the shared case above, a hole punch or truncate * could have been performed on the private mapping. * Examine the value of chg to determine if reserves * actually exist or were previously consumed. * Very Subtle - The value of chg comes from a previous * call to vma_needs_reserves(). The reserve map for * private mappings has different (opposite) semantics * than that of shared mappings. vma_needs_reserves() * has already taken this difference in semantics into * account. Therefore, the meaning of chg is the same * as in the shared case above. Code could easily be * combined, but keeping it separate draws attention to * subtle differences. */ if (chg) return false; else return true; } return false; } static void enqueue_huge_page(struct hstate *h, struct page *page) { int nid = page_to_nid(page); list_move(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; } static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) { struct page *page; list_for_each_entry(page, &h->hugepage_freelists[nid], lru) if (!PageHWPoison(page)) break; /* * if 'non-isolated free hugepage' not found on the list, * the allocation fails. */ if (&h->hugepage_freelists[nid] == &page->lru) return NULL; list_move(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return page; } static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { unsigned int cpuset_mems_cookie; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; int node = -1; zonelist = node_zonelist(nid, gfp_mask); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { struct page *page; if (!cpuset_zone_allowed(zone, gfp_mask)) continue; /* * no need to ask again on the same node. Pool is node rather than * zone aware */ if (zone_to_nid(zone) == node) continue; node = zone_to_nid(zone); page = dequeue_huge_page_node_exact(h, node); if (page) return page; } if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return NULL; } /* Movability of hugepages depends on migration support. */ static inline gfp_t htlb_alloc_mask(struct hstate *h) { if (hugepage_migration_supported(h)) return GFP_HIGHUSER_MOVABLE; else return GFP_HIGHUSER; } static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) { struct page *page; struct mempolicy *mpol; gfp_t gfp_mask; nodemask_t *nodemask; int nid; /* * A child process with MAP_PRIVATE mappings created by their parent * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ if (!vma_has_reserves(vma, chg) && h->free_huge_pages - h->resv_huge_pages == 0) goto err; /* If reserves cannot be used, ensure enough pages are in the pool */ if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) goto err; gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { SetPagePrivate(page); h->resv_huge_pages--; } mpol_cond_put(mpol); return page; err: return NULL; } /* * common helper functions for hstate_next_node_to_{alloc|free}. * We may have allocated or freed a huge page based on a different * nodes_allowed previously, so h->next_node_to_{alloc|free} might * be outside of *nodes_allowed. Ensure that we use an allowed * node for alloc or free. */ static int next_node_allowed(int nid, nodemask_t *nodes_allowed) { nid = next_node_in(nid, *nodes_allowed); VM_BUG_ON(nid >= MAX_NUMNODES); return nid; } static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) { if (!node_isset(nid, *nodes_allowed)) nid = next_node_allowed(nid, nodes_allowed); return nid; } /* * returns the previously saved node ["this node"] from which to * allocate a persistent huge page for the pool and advance the * next node from which to allocate, handling wrap at end of node * mask. */ static int hstate_next_node_to_alloc(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); return nid; } /* * helper for free_pool_huge_page() - return the previously saved * node ["this node"] from which to free a huge page. Advance the * next node id whether or not we find a free huge page to free so * that the next attempt to free addresses the next node. */ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); return nid; } #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ for (nr_nodes = nodes_weight(*mask); \ nr_nodes > 0 && \ ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ nr_nodes--) #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ for (nr_nodes = nodes_weight(*mask); \ nr_nodes > 0 && \ ((node = hstate_next_node_to_free(hs, mask)) || 1); \ nr_nodes--) #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static void destroy_compound_gigantic_page(struct page *page, unsigned int order) { int i; int nr_pages = 1 << order; struct page *p = page + 1; atomic_set(compound_mapcount_ptr(page), 0); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { clear_compound_head(p); set_page_refcounted(p); } set_compound_order(page, 0); __ClearPageHead(page); } static void free_gigantic_page(struct page *page, unsigned int order) { free_contig_range(page_to_pfn(page), 1 << order); } static int __alloc_gigantic_page(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) { unsigned long end_pfn = start_pfn + nr_pages; return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, gfp_mask); } static bool pfn_range_valid_gigantic(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) { unsigned long i, end_pfn = start_pfn + nr_pages; struct page *page; for (i = start_pfn; i < end_pfn; i++) { if (!pfn_valid(i)) return false; page = pfn_to_page(i); if (page_zone(page) != z) return false; if (PageReserved(page)) return false; if (page_count(page) > 0) return false; if (PageHuge(page)) return false; } return true; } static bool zone_spans_last_pfn(const struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) { unsigned long last_pfn = start_pfn + nr_pages - 1; return zone_spans_pfn(zone, last_pfn); } static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { unsigned int order = huge_page_order(h); unsigned long nr_pages = 1 << order; unsigned long ret, pfn, flags; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; zonelist = node_zonelist(nid, gfp_mask); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { spin_lock_irqsave(&zone->lock, flags); pfn = ALIGN(zone->zone_start_pfn, nr_pages); while (zone_spans_last_pfn(zone, pfn, nr_pages)) { if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) { /* * We release the zone lock here because * alloc_contig_range() will also lock the zone * at some point. If there's an allocation * spinning on this lock, it may win the race * and cause alloc_contig_range() to fail... */ spin_unlock_irqrestore(&zone->lock, flags); ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask); if (!ret) return pfn_to_page(pfn); spin_lock_irqsave(&zone->lock, flags); } pfn += nr_pages; } spin_unlock_irqrestore(&zone->lock, flags); } return NULL; } static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); static void prep_compound_gigantic_page(struct page *page, unsigned int order); #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ static inline bool gigantic_page_supported(void) { return false; } static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { return NULL; } static inline void free_gigantic_page(struct page *page, unsigned int order) { } static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } #endif static void update_and_free_page(struct hstate *h, struct page *page) { int i; if (hstate_is_gigantic(h) && !gigantic_page_supported()) return; h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; for (i = 0; i < pages_per_huge_page(h); i++) { page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | 1 << PG_active | 1 << PG_private | 1 << PG_writeback); } VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); set_compound_page_dtor(page, NULL_COMPOUND_DTOR); set_page_refcounted(page); if (hstate_is_gigantic(h)) { destroy_compound_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h)); } else { __free_pages(page, huge_page_order(h)); } } struct hstate *size_to_hstate(unsigned long size) { struct hstate *h; for_each_hstate(h) { if (huge_page_size(h) == size) return h; } return NULL; } /* * Test to determine whether the hugepage is "active/in-use" (i.e. being linked * to hstate->hugepage_activelist.) * * This function can be called for tail pages, but never returns true for them. */ bool page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); return PageHead(page) && PagePrivate(&page[1]); } /* never called for tail page */ static void set_page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHeadHuge(page), page); SetPagePrivate(&page[1]); } static void clear_page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHeadHuge(page), page); ClearPagePrivate(&page[1]); } /* * Internal hugetlb specific page flag. Do not use outside of the hugetlb * code */ static inline bool PageHugeTemporary(struct page *page) { if (!PageHuge(page)) return false; return (unsigned long)page[2].mapping == -1U; } static inline void SetPageHugeTemporary(struct page *page) { page[2].mapping = (void *)-1U; } static inline void ClearPageHugeTemporary(struct page *page) { page[2].mapping = NULL; } void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the * compound page destructor. */ struct hstate *h = page_hstate(page); int nid = page_to_nid(page); struct hugepage_subpool *spool = (struct hugepage_subpool *)page_private(page); bool restore_reserve; VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_mapcount(page), page); set_page_private(page, 0); page->mapping = NULL; restore_reserve = PagePrivate(page); ClearPagePrivate(page); /* * A return code of zero implies that the subpool will be under its * minimum size if the reservation is not restored after page is free. * Therefore, force restore_reserve operation. */ if (hugepage_subpool_put_pages(spool, 1) == 0) restore_reserve = true; spin_lock(&hugetlb_lock); clear_page_huge_active(page); hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h), page); if (restore_reserve) h->resv_huge_pages++; if (PageHugeTemporary(page)) { list_del(&page->lru); ClearPageHugeTemporary(page); update_and_free_page(h, page); } else if (h->surplus_huge_pages_node[nid]) { /* remove the page from active list */ list_del(&page->lru); update_and_free_page(h, page); h->surplus_huge_pages--; h->surplus_huge_pages_node[nid]--; } else { arch_clear_hugepage_flags(page); enqueue_huge_page(h, page); } spin_unlock(&hugetlb_lock); } static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) { INIT_LIST_HEAD(&page->lru); set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); spin_lock(&hugetlb_lock); set_hugetlb_cgroup(page, NULL); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; spin_unlock(&hugetlb_lock); } static void prep_compound_gigantic_page(struct page *page, unsigned int order) { int i; int nr_pages = 1 << order; struct page *p = page + 1; /* we rely on prep_new_huge_page to set the destructor */ set_compound_order(page, order); __ClearPageReserved(page); __SetPageHead(page); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { /* * For gigantic hugepages allocated through bootmem at * boot, it's safer to be consistent with the not-gigantic * hugepages and clear the PG_reserved bit from all tail pages * too. Otherwse drivers using get_user_pages() to access tail * pages may get the reference counting wrong if they see * PG_reserved set on a tail page (despite the head page not * having PG_reserved set). Enforcing this consistency between * head and tail pages allows drivers to optimize away a check * on the head page when they need know if put_page() is needed * after get_user_pages(). */ __ClearPageReserved(p); set_page_count(p, 0); set_compound_head(p, page); } atomic_set(compound_mapcount_ptr(page), -1); } /* * PageHuge() only returns true for hugetlbfs pages, but not for normal or * transparent huge pages. See the PageTransHuge() documentation for more * details. */ int PageHuge(struct page *page) { if (!PageCompound(page)) return 0; page = compound_head(page); return page[1].compound_dtor == HUGETLB_PAGE_DTOR; } EXPORT_SYMBOL_GPL(PageHuge); /* * PageHeadHuge() only returns true for hugetlbfs head page, but not for * normal or transparent huge pages. */ int PageHeadHuge(struct page *page_head) { if (!PageHead(page_head)) return 0; return get_compound_page_dtor(page_head) == free_huge_page; } pgoff_t __basepage_index(struct page *page) { struct page *page_head = compound_head(page); pgoff_t index = page_index(page_head); unsigned long compound_idx; if (!PageHuge(page_head)) return page_index(page); if (compound_order(page_head) >= MAX_ORDER) compound_idx = page_to_pfn(page) - page_to_pfn(page_head); else compound_idx = page - page_head; return (index << compound_order(page_head)) + compound_idx; } static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { int order = huge_page_order(h); struct page *page; gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); if (page) __count_vm_event(HTLB_BUDDY_PGALLOC); else __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return page; } /* * Common helper to allocate a fresh hugetlb page. All specific allocators * should use this function to get new hugetlb pages */ static struct page *alloc_fresh_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page; if (hstate_is_gigantic(h)) page = alloc_gigantic_page(h, gfp_mask, nid, nmask); else page = alloc_buddy_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; if (hstate_is_gigantic(h)) prep_compound_gigantic_page(page, huge_page_order(h)); prep_new_huge_page(h, page, page_to_nid(page)); return page; } /* * Allocates a fresh page to the hugetlb allocator pool in the node interleaved * manner. */ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) { struct page *page; int nr_nodes, node; gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed); if (page) break; } if (!page) return 0; put_page(page); /* free it into the hugepage allocator */ return 1; } /* * Free huge page from pool from next node to free. * Attempt to keep persistent huge pages more or less * balanced over allowed nodes. * Called with hugetlb_lock locked. */ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, bool acct_surplus) { int nr_nodes, node; int ret = 0; for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { /* * If we're returning unused surplus pages, only examine * nodes with surplus pages. */ if ((!acct_surplus || h->surplus_huge_pages_node[node]) && !list_empty(&h->hugepage_freelists[node])) { struct page *page = list_entry(h->hugepage_freelists[node].next, struct page, lru); list_del(&page->lru); h->free_huge_pages--; h->free_huge_pages_node[node]--; if (acct_surplus) { h->surplus_huge_pages--; h->surplus_huge_pages_node[node]--; } update_and_free_page(h, page); ret = 1; break; } } return ret; } /* * Dissolve a given free hugepage into free buddy pages. This function does * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the * dissolution fails because a give page is not a free hugepage, or because * free hugepages are fully reserved. */ int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; spin_lock(&hugetlb_lock); if (PageHuge(page) && !page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); int nid = page_to_nid(head); if (h->free_huge_pages - h->resv_huge_pages == 0) goto out; /* * Move PageHWPoison flag from head page to the raw error page, * which makes any subpages rather than the error page reusable. */ if (PageHWPoison(head) && page != head) { SetPageHWPoison(page); ClearPageHWPoison(head); } list_del(&head->lru); h->free_huge_pages--; h->free_huge_pages_node[nid]--; h->max_huge_pages--; update_and_free_page(h, head); rc = 0; } out: spin_unlock(&hugetlb_lock); return rc; } /* * Dissolve free hugepages in a given pfn range. Used by memory hotplug to * make specified memory blocks removable from the system. * Note that this will dissolve a free gigantic hugepage completely, if any * part of it lies within the given range. * Also note that if dissolve_free_huge_page() returns with an error, all * free hugepages that were dissolved before that error are lost. */ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; int rc = 0; if (!hugepages_supported()) return rc; for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { page = pfn_to_page(pfn); if (PageHuge(page) && !page_count(page)) { rc = dissolve_free_huge_page(page); if (rc) break; } } return rc; } /* * Allocates a fresh surplus page from the page allocator. */ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page = NULL; if (hstate_is_gigantic(h)) return NULL; spin_lock(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) goto out_unlock; spin_unlock(&hugetlb_lock); page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; spin_lock(&hugetlb_lock); /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse * temporary page to workaround the nasty free_huge_page * codeflow */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { SetPageHugeTemporary(page); put_page(page); page = NULL; } else { h->surplus_huge_pages++; h->surplus_huge_pages_node[page_to_nid(page)]++; } out_unlock: spin_unlock(&hugetlb_lock); return page; } static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page; if (hstate_is_gigantic(h)) return NULL; page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; /* * We do not account these pages as surplus because they are only * temporary and will be released properly on the last reference */ SetPageHugeTemporary(page); return page; } /* * Use the VMA's mpolicy to allocate a huge page from the buddy. */ static struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { struct page *page; struct mempolicy *mpol; gfp_t gfp_mask = htlb_alloc_mask(h); int nid; nodemask_t *nodemask; nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); mpol_cond_put(mpol); return page; } /* page migration callback function */ struct page *alloc_huge_page_node(struct hstate *h, int nid) { gfp_t gfp_mask = htlb_alloc_mask(h); struct page *page = NULL; if (nid != NUMA_NO_NODE) gfp_mask |= __GFP_THISNODE; spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); spin_unlock(&hugetlb_lock); if (!page) page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); return page; } /* page migration callback function */ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) { gfp_t gfp_mask = htlb_alloc_mask(h); spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) { struct page *page; page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); if (page) { spin_unlock(&hugetlb_lock); return page; } } spin_unlock(&hugetlb_lock); return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); } /* mempolicy aware migration callback */ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct mempolicy *mpol; nodemask_t *nodemask; struct page *page; gfp_t gfp_mask; int node; gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); page = alloc_huge_page_nodemask(h, node, nodemask); mpol_cond_put(mpol); return page; } /* * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. */ static int gather_surplus_pages(struct hstate *h, int delta) { struct list_head surplus_list; struct page *page, *tmp; int ret, i; int needed, allocated; bool alloc_ok = true; needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { h->resv_huge_pages += delta; return 0; } allocated = 0; INIT_LIST_HEAD(&surplus_list); ret = -ENOMEM; retry: spin_unlock(&hugetlb_lock); for (i = 0; i < needed; i++) { page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), NUMA_NO_NODE, NULL); if (!page) { alloc_ok = false; break; } list_add(&page->lru, &surplus_list); cond_resched(); } allocated += i; /* * After retaking hugetlb_lock, we need to recalculate 'needed' * because either resv_huge_pages or free_huge_pages may have changed. */ spin_lock(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - (h->free_huge_pages + allocated); if (needed > 0) { if (alloc_ok) goto retry; /* * We were not able to allocate enough pages to * satisfy the entire reservation so we free what * we've allocated so far. */ goto free; } /* * The surplus_list now contains _at_least_ the number of extra pages * needed to accommodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy * allocator. Commit the entire reservation here to prevent another * process from stealing the pages as they are added to the pool but * before they are reserved. */ needed += allocated; h->resv_huge_pages += delta; ret = 0; /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) break; /* * This page is now managed by the hugetlb allocator and has * no users -- drop the buddy allocator's reference. */ put_page_testzero(page); VM_BUG_ON_PAGE(page_count(page), page); enqueue_huge_page(h, page); } free: spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) put_page(page); spin_lock(&hugetlb_lock); return ret; } /* * This routine has two main purposes: * 1) Decrement the reservation count (resv_huge_pages) by the value passed * in unused_resv_pages. This corresponds to the prior adjustments made * to the associated reservation map. * 2) Free any unused surplus pages that may have been allocated to satisfy * the reservation. As many as unused_resv_pages may be freed. * * Called with hugetlb_lock held. However, the lock could be dropped (and * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, * we must make sure nobody else can claim pages we are in the process of * freeing. Do this by ensuring resv_huge_page always is greater than the * number of huge pages we plan to free when dropping the lock. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; /* Cannot return gigantic pages currently */ if (hstate_is_gigantic(h)) goto out; /* * Part (or even all) of the reservation could have been backed * by pre-allocated pages. Only free surplus pages. */ nr_pages = min(unused_resv_pages, h->surplus_huge_pages); /* * We want to release as many surplus pages as possible, spread * evenly across all nodes with memory. Iterate across these nodes * until we can no longer free unreserved surplus pages. This occurs * when the nodes with surplus pages have no free pages. * free_pool_huge_page() will balance the the freed pages across the * on-line nodes with memory and will handle the hstate accounting. * * Note that we decrement resv_huge_pages as we free the pages. If * we drop the lock, resv_huge_pages will still be sufficiently large * to cover subsequent pages we may free. */ while (nr_pages--) { h->resv_huge_pages--; unused_resv_pages--; if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) goto out; cond_resched_lock(&hugetlb_lock); } out: /* Fully uncommit the reservation */ h->resv_huge_pages -= unused_resv_pages; } /* * vma_needs_reservation, vma_commit_reservation and vma_end_reservation * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr * within the vma has an associated reservation. If a reservation is * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called * to add the page to the reservation map. If the page allocation fails, * the reservation must be ended instead of committed. vma_end_reservation * is called in such cases. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this * is not the case is if a reserve map was changed between calls. It * is the responsibility of the caller to notice the difference and * take appropriate action. * * vma_add_reservation is used in error paths where a reservation must * be restored when a newly allocated huge page must be freed. It is * to be called after calling vma_needs_reservation to determine if a * reservation exists. */ enum vma_resv_mode { VMA_NEEDS_RESV, VMA_COMMIT_RESV, VMA_END_RESV, VMA_ADD_RESV, }; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) { struct resv_map *resv; pgoff_t idx; long ret; resv = vma_resv_map(vma); if (!resv) return 1; idx = vma_hugecache_offset(h, vma, addr); switch (mode) { case VMA_NEEDS_RESV: ret = region_chg(resv, idx, idx + 1); break; case VMA_COMMIT_RESV: ret = region_add(resv, idx, idx + 1); break; case VMA_END_RESV: region_abort(resv, idx, idx + 1); ret = 0; break; case VMA_ADD_RESV: if (vma->vm_flags & VM_MAYSHARE) ret = region_add(resv, idx, idx + 1); else { region_abort(resv, idx, idx + 1); ret = region_del(resv, idx, idx + 1); } break; default: BUG(); } if (vma->vm_flags & VM_MAYSHARE) return ret; else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { /* * In most cases, reserves always exist for private mappings. * However, a file associated with mapping could have been * hole punched or truncated after reserves were consumed. * As subsequent fault on such a range will not use reserves. * Subtle - The reserve map for private mappings has the * opposite meaning than that of shared mappings. If NO * entry is in the reserve map, it means a reservation exists. * If an entry exists in the reserve map, it means the * reservation has already been consumed. As a result, the * return value of this routine is the opposite of the * value returned from reserve map manipulation routines above. */ if (ret) return 0; else return 1; } else return ret < 0 ? ret : 0; } static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); } static long vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); } static void vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); } static long vma_add_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); } /* * This routine is called to restore a reservation on error paths. In the * specific error paths, a huge page was allocated (via alloc_huge_page) * and is about to be freed. If a reservation for the page existed, * alloc_huge_page would have consumed the reservation and set PagePrivate * in the newly allocated page. When the page is freed via free_huge_page, * the global reservation count will be incremented if PagePrivate is set. * However, free_huge_page can not adjust the reserve map. Adjust the * reserve map here to be consistent with global reserve count adjustments * to be made by free_huge_page. */ static void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct page *page) { if (unlikely(PagePrivate(page))) { long rc = vma_needs_reservation(h, vma, address); if (unlikely(rc < 0)) { /* * Rare out of memory condition in reserve map * manipulation. Clear PagePrivate so that * global reserve count will not be incremented * by free_huge_page. This will make it appear * as though the reservation for this page was * consumed. This may prevent the task from * faulting in the page at a later time. This * is better than inconsistent global huge page * accounting of reserve counts. */ ClearPagePrivate(page); } else if (rc) { rc = vma_add_reservation(h, vma, address); if (unlikely(rc < 0)) /* * See above comment about rare out of * memory condition. */ ClearPagePrivate(page); } else vma_end_reservation(h, vma, address); } } struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct page *page; long map_chg, map_commit; long gbl_chg; int ret, idx; struct hugetlb_cgroup *h_cg; idx = hstate_index(h); /* * Examine the region/reserve map to determine if the process * has a reservation for the page to be allocated. A return * code of zero indicates a reservation exists (no change). */ map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); if (map_chg < 0) return ERR_PTR(-ENOMEM); /* * Processes that did not create the mapping will have no * reserves as indicated by the region/reserve map. Check * that the allocation will not exceed the subpool limit. * Allocations for MAP_NORESERVE mappings also need to be * checked against any subpool limit. */ if (map_chg || avoid_reserve) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) { vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } /* * Even though there was no reservation in the region/reserve * map, there could be reservations associated with the * subpool that can be used. This would be indicated if the * return value of hugepage_subpool_get_pages() is zero. * However, if avoid_reserve is specified we still avoid even * the subpool reservations. */ if (avoid_reserve) gbl_chg = 1; } ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_subpool_put; spin_lock(&hugetlb_lock); /* * glb_chg is passed to indicate whether or not a page must be taken * from the global free pool (global change). gbl_chg == 0 indicates * a reservation exists for the allocation. */ page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!page) { spin_unlock(&hugetlb_lock); page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetPagePrivate(page); h->resv_huge_pages--; } spin_lock(&hugetlb_lock); list_move(&page->lru, &h->hugepage_activelist); /* Fall through */ } hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); spin_unlock(&hugetlb_lock); set_page_private(page, (unsigned long)spool); map_commit = vma_commit_reservation(h, vma, addr); if (unlikely(map_chg > map_commit)) { /* * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. * This indicates a race with hugetlb_reserve_pages. * Adjust for the subpool count incremented above AND * in hugetlb_reserve_pages for the same page. Also, * the reservation count added in hugetlb_reserve_pages * no longer applies. */ long rsv_adjust; rsv_adjust = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -rsv_adjust); } return page; out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_subpool_put: if (map_chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } int alloc_bootmem_huge_page(struct hstate *h) __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); int __alloc_bootmem_huge_page(struct hstate *h) { struct huge_bootmem_page *m; int nr_nodes, node; for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { void *addr; addr = memblock_alloc_try_nid_raw( huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); if (addr) { /* * Use the beginning of the huge page to store the * huge_bootmem_page struct (until gather_bootmem * puts them into the mem_map). */ m = addr; goto found; } } return 0; found: BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); /* Put them into a private list first because mem_map is not up yet */ INIT_LIST_HEAD(&m->list); list_add(&m->list, &huge_boot_pages); m->hstate = h; return 1; } static void __init prep_compound_huge_page(struct page *page, unsigned int order) { if (unlikely(order > (MAX_ORDER - 1))) prep_compound_gigantic_page(page, order); else prep_compound_page(page, order); } /* Put bootmem huge pages into the standard lists after mem_map is up */ static void __init gather_bootmem_prealloc(void) { struct huge_bootmem_page *m; list_for_each_entry(m, &huge_boot_pages, list) { struct page *page = virt_to_page(m); struct hstate *h = m->hstate; WARN_ON(page_count(page) != 1); prep_compound_huge_page(page, h->order); WARN_ON(PageReserved(page)); prep_new_huge_page(h, page, page_to_nid(page)); put_page(page); /* free it into the hugepage allocator */ /* * If we had gigantic hugepages allocated at boot time, we need * to restore the 'stolen' pages to totalram_pages in order to * fix confusing memory reports from free(1) and another * side-effects, like CommitLimit going negative. */ if (hstate_is_gigantic(h)) adjust_managed_page_count(page, 1 << h->order); cond_resched(); } } static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long i; for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { if (!alloc_bootmem_huge_page(h)) break; } else if (!alloc_pool_huge_page(h, &node_states[N_MEMORY])) break; cond_resched(); } if (i < h->max_huge_pages) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", h->max_huge_pages, buf, i); h->max_huge_pages = i; } } static void __init hugetlb_init_hstates(void) { struct hstate *h; for_each_hstate(h) { if (minimum_order > huge_page_order(h)) minimum_order = huge_page_order(h); /* oversize hugepages were init'ed in early boot */ if (!hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } VM_BUG_ON(minimum_order == UINT_MAX); } static void __init report_hugepages(void) { struct hstate *h; for_each_hstate(h) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", buf, h->free_huge_pages); } } #ifdef CONFIG_HIGHMEM static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; if (hstate_is_gigantic(h)) return; for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { if (count >= h->nr_huge_pages) return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(h, page); h->free_huge_pages--; h->free_huge_pages_node[page_to_nid(page)]--; } } } #else static inline void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { } #endif /* * Increment or decrement surplus_huge_pages. Keep node-specific counters * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, int delta) { int nr_nodes, node; VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) { for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node]) goto found; } } else { for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node] < h->nr_huge_pages_node[node]) goto found; } } return 0; found: h->surplus_huge_pages += delta; h->surplus_huge_pages_node[node] += delta; return 1; } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { unsigned long min_count, ret; if (hstate_is_gigantic(h) && !gigantic_page_supported()) return h->max_huge_pages; /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with alloc_surplus_huge_page() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ spin_unlock(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); ret = alloc_pool_huge_page(h, nodes_allowed); spin_lock(&hugetlb_lock); if (!ret) goto out; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) goto out; } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * alloc_surplus_huge_page() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); while (min_count < persistent_huge_pages(h)) { if (!free_pool_huge_page(h, nodes_allowed, 0)) break; cond_resched_lock(&hugetlb_lock); } while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: ret = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); return ret; } #define HSTATE_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define HSTATE_ATTR(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) static struct kobject *hugepages_kobj; static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) { int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (hstate_kobjs[i] == kobj) { if (nidp) *nidp = NUMA_NO_NODE; return &hstates[i]; } return kobj_to_node_hstate(kobj, nidp); } static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long nr_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) nr_huge_pages = h->nr_huge_pages; else nr_huge_pages = h->nr_huge_pages_node[nid]; return sprintf(buf, "%lu\n", nr_huge_pages); } static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, struct hstate *h, int nid, unsigned long count, size_t len) { int err; NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); if (hstate_is_gigantic(h) && !gigantic_page_supported()) { err = -EINVAL; goto out; } if (nid == NUMA_NO_NODE) { /* * global hstate attribute */ if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { NODEMASK_FREE(nodes_allowed); nodes_allowed = &node_states[N_MEMORY]; } } else if (nodes_allowed) { /* * per node hstate attribute: adjust count to global, * but restrict alloc/free to the specified node. */ count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; init_nodemask_of_node(nodes_allowed, nid); } else nodes_allowed = &node_states[N_MEMORY]; h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); if (nodes_allowed != &node_states[N_MEMORY]) NODEMASK_FREE(nodes_allowed); return len; out: NODEMASK_FREE(nodes_allowed); return err; } static ssize_t nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, const char *buf, size_t len) { struct hstate *h; unsigned long count; int nid; int err; err = kstrtoul(buf, 10, &count); if (err) return err; h = kobj_to_hstate(kobj, &nid); return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); } static ssize_t nr_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(false, kobj, buf, len); } HSTATE_ATTR(nr_hugepages); #ifdef CONFIG_NUMA /* * hstate attribute for optionally mempolicy-based constraint on persistent * huge page alloc/free. */ static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(true, kobj, buf, len); } HSTATE_ATTR(nr_hugepages_mempolicy); #endif static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); } static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long input; struct hstate *h = kobj_to_hstate(kobj, NULL); if (hstate_is_gigantic(h)) return -EINVAL; err = kstrtoul(buf, 10, &input); if (err) return err; spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = input; spin_unlock(&hugetlb_lock); return count; } HSTATE_ATTR(nr_overcommit_hugepages); static ssize_t free_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long free_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) free_huge_pages = h->free_huge_pages; else free_huge_pages = h->free_huge_pages_node[nid]; return sprintf(buf, "%lu\n", free_huge_pages); } HSTATE_ATTR_RO(free_hugepages); static ssize_t resv_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->resv_huge_pages); } HSTATE_ATTR_RO(resv_hugepages); static ssize_t surplus_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long surplus_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) surplus_huge_pages = h->surplus_huge_pages; else surplus_huge_pages = h->surplus_huge_pages_node[nid]; return sprintf(buf, "%lu\n", surplus_huge_pages); } HSTATE_ATTR_RO(surplus_hugepages); static struct attribute *hstate_attrs[] = { &nr_hugepages_attr.attr, &nr_overcommit_hugepages_attr.attr, &free_hugepages_attr.attr, &resv_hugepages_attr.attr, &surplus_hugepages_attr.attr, #ifdef CONFIG_NUMA &nr_hugepages_mempolicy_attr.attr, #endif NULL, }; static const struct attribute_group hstate_attr_group = { .attrs = hstate_attrs, }; static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, struct kobject **hstate_kobjs, const struct attribute_group *hstate_attr_group) { int retval; int hi = hstate_index(h); hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); if (!hstate_kobjs[hi]) return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); if (retval) kobject_put(hstate_kobjs[hi]); return retval; } static void __init hugetlb_sysfs_init(void) { struct hstate *h; int err; hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); if (!hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, hstate_kobjs, &hstate_attr_group); if (err) pr_err("Hugetlb: Unable to add hstate %s", h->name); } } #ifdef CONFIG_NUMA /* * node_hstate/s - associate per node hstate attributes, via their kobjects, * with node devices in node_devices[] using a parallel array. The array * index of a node device or _hstate == node id. * This is here to avoid any static dependency of the node device driver, in * the base kernel, on the hugetlb module. */ struct node_hstate { struct kobject *hugepages_kobj; struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; }; static struct node_hstate node_hstates[MAX_NUMNODES]; /* * A subset of global hstate attributes for node devices */ static struct attribute *per_node_hstate_attrs[] = { &nr_hugepages_attr.attr, &free_hugepages_attr.attr, &surplus_hugepages_attr.attr, NULL, }; static const struct attribute_group per_node_hstate_attr_group = { .attrs = per_node_hstate_attrs, }; /* * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. * Returns node id via non-NULL nidp. */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { int nid; for (nid = 0; nid < nr_node_ids; nid++) { struct node_hstate *nhs = &node_hstates[nid]; int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (nhs->hstate_kobjs[i] == kobj) { if (nidp) *nidp = nid; return &hstates[i]; } } BUG(); return NULL; } /* * Unregister hstate attributes from a single node device. * No-op if no hstate attributes attached. */ static void hugetlb_unregister_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; if (!nhs->hugepages_kobj) return; /* no hstate attributes */ for_each_hstate(h) { int idx = hstate_index(h); if (nhs->hstate_kobjs[idx]) { kobject_put(nhs->hstate_kobjs[idx]); nhs->hstate_kobjs[idx] = NULL; } } kobject_put(nhs->hugepages_kobj); nhs->hugepages_kobj = NULL; } /* * Register hstate attributes for a single node device. * No-op if attributes already registered. */ static void hugetlb_register_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; int err; if (nhs->hugepages_kobj) return; /* already allocated */ nhs->hugepages_kobj = kobject_create_and_add("hugepages", &node->dev.kobj); if (!nhs->hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, nhs->hstate_kobjs, &per_node_hstate_attr_group); if (err) { pr_err("Hugetlb: Unable to add hstate %s for node %d\n", h->name, node->dev.id); hugetlb_unregister_node(node); break; } } } /* * hugetlb init time: register hstate attributes for all registered node * devices of nodes that have memory. All on-line nodes should have * registered their associated device by this time. */ static void __init hugetlb_register_all_nodes(void) { int nid; for_each_node_state(nid, N_MEMORY) { struct node *node = node_devices[nid]; if (node->dev.id == nid) hugetlb_register_node(node); } /* * Let the node device driver know we're here so it can * [un]register hstate attributes on node hotplug. */ register_hugetlbfs_with_node(hugetlb_register_node, hugetlb_unregister_node); } #else /* !CONFIG_NUMA */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { BUG(); if (nidp) *nidp = -1; return NULL; } static void hugetlb_register_all_nodes(void) { } #endif static int __init hugetlb_init(void) { int i; if (!hugepages_supported()) return 0; if (!size_to_hstate(default_hstate_size)) { if (default_hstate_size != 0) { pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n", default_hstate_size, HPAGE_SIZE); } default_hstate_size = HPAGE_SIZE; if (!size_to_hstate(default_hstate_size)) hugetlb_add_hstate(HUGETLB_PAGE_ORDER); } default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); if (default_hstate_max_huge_pages) { if (!default_hstate.max_huge_pages) default_hstate.max_huge_pages = default_hstate_max_huge_pages; } hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); hugetlb_sysfs_init(); hugetlb_register_all_nodes(); hugetlb_cgroup_file_init(); #ifdef CONFIG_SMP num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); #else num_fault_mutexes = 1; #endif hugetlb_fault_mutex_table = kmalloc_array(num_fault_mutexes, sizeof(struct mutex), GFP_KERNEL); BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } subsys_initcall(hugetlb_init); /* Should be called on processing a hugepagesz=... option */ void __init hugetlb_bad_size(void) { parsed_valid_hugepagesz = false; } void __init hugetlb_add_hstate(unsigned int order) { struct hstate *h; unsigned long i; if (size_to_hstate(PAGE_SIZE << order)) { pr_warn("hugepagesz= specified twice, ignoring\n"); return; } BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order == 0); h = &hstates[hugetlb_max_hstate++]; h->order = order; h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); h->nr_huge_pages = 0; h->free_huge_pages = 0; for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); h->next_nid_to_alloc = first_memory_node; h->next_nid_to_free = first_memory_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); parsed_hstate = h; } static int __init hugetlb_nrpages_setup(char *s) { unsigned long *mhp; static unsigned long *last_mhp; if (!parsed_valid_hugepagesz) { pr_warn("hugepages = %s preceded by " "an unsupported hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; return 1; } /* * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, * so this hugepages= parameter goes to the "default hstate". */ else if (!hugetlb_max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; if (mhp == last_mhp) { pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n"); return 1; } if (sscanf(s, "%lu", mhp) <= 0) *mhp = 0; /* * Global state is always initialized later in hugetlb_init. * But we need to allocate >= MAX_ORDER hstates here early to still * use the bootmem allocator. */ if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp; return 1; } __setup("hugepages=", hugetlb_nrpages_setup); static int __init hugetlb_default_setup(char *s) { default_hstate_size = memparse(s, &s); return 1; } __setup("default_hugepagesz=", hugetlb_default_setup); static unsigned int cpuset_mems_nr(unsigned int *array) { int node; unsigned int nr = 0; for_each_node_mask(node, cpuset_current_mems_allowed) nr += array[node]; return nr; } #ifdef CONFIG_SYSCTL static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp = h->max_huge_pages; int ret; if (!hugepages_supported()) return -EOPNOTSUPP; table->data = &tmp; table->maxlen = sizeof(unsigned long); ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (ret) goto out; if (write) ret = __nr_hugepages_store_common(obey_mempolicy, h, NUMA_NO_NODE, tmp, *length); out: return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(false, table, write, buffer, length, ppos); } #ifdef CONFIG_NUMA int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(true, table, write, buffer, length, ppos); } #endif /* CONFIG_NUMA */ int hugetlb_overcommit_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; int ret; if (!hugepages_supported()) return -EOPNOTSUPP; tmp = h->nr_overcommit_huge_pages; if (write && hstate_is_gigantic(h)) return -EINVAL; table->data = &tmp; table->maxlen = sizeof(unsigned long); ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (ret) goto out; if (write) { spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = tmp; spin_unlock(&hugetlb_lock); } out: return ret; } #endif /* CONFIG_SYSCTL */ void hugetlb_report_meminfo(struct seq_file *m) { struct hstate *h; unsigned long total = 0; if (!hugepages_supported()) return; for_each_hstate(h) { unsigned long count = h->nr_huge_pages; total += (PAGE_SIZE << huge_page_order(h)) * count; if (h == &default_hstate) seq_printf(m, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" "HugePages_Surp: %5lu\n" "Hugepagesize: %8lu kB\n", count, h->free_huge_pages, h->resv_huge_pages, h->surplus_huge_pages, (PAGE_SIZE << huge_page_order(h)) / 1024); } seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024); } int hugetlb_report_node_meminfo(int nid, char *buf) { struct hstate *h = &default_hstate; if (!hugepages_supported()) return 0; return sprintf(buf, "Node %d HugePages_Total: %5u\n" "Node %d HugePages_Free: %5u\n" "Node %d HugePages_Surp: %5u\n", nid, h->nr_huge_pages_node[nid], nid, h->free_huge_pages_node[nid], nid, h->surplus_huge_pages_node[nid]); } void hugetlb_show_meminfo(void) { struct hstate *h; int nid; if (!hugepages_supported()) return; for_each_node_state(nid, N_MEMORY) for_each_hstate(h) pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", nid, h->nr_huge_pages_node[nid], h->free_huge_pages_node[nid], h->surplus_huge_pages_node[nid], 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); } void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) { seq_printf(m, "HugetlbPages:\t%8lu kB\n", atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { struct hstate *h; unsigned long nr_total_pages = 0; for_each_hstate(h) nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) { int ret = -ENOMEM; spin_lock(&hugetlb_lock); /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such * reservation is completely rubbish in the presence of cpuset because * the reservation is not checked against page availability for the * current cpuset. Application can still potentially OOM'ed by kernel * with lack of free htlb page in cpuset that the task is in. * Attempt to enforce strict accounting with cpuset is almost * impossible (or too ugly) because cpuset is too fluid that * task or memory node can be dynamically moved between cpusets. * * The change of semantics for shared hugetlb mapping with cpuset is * undesirable. However, in order to preserve some of the semantics, * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { return_unused_surplus_pages(h, delta); goto out; } } ret = 0; if (delta < 0) return_unused_surplus_pages(h, (unsigned long) -delta); out: spin_unlock(&hugetlb_lock); return ret; } static void hugetlb_vm_op_open(struct vm_area_struct *vma) { struct resv_map *resv = vma_resv_map(vma); /* * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA * has a reference to the reservation map it cannot disappear until * after this open call completes. It is therefore safe to take a * new reference here without additional locking. */ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_get(&resv->refs); } static void hugetlb_vm_op_close(struct vm_area_struct *vma) { struct hstate *h = hstate_vma(vma); struct resv_map *resv = vma_resv_map(vma); struct hugepage_subpool *spool = subpool_vma(vma); unsigned long reserve, start, end; long gbl_reserve; if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) return; start = vma_hugecache_offset(h, vma, vma->vm_start); end = vma_hugecache_offset(h, vma, vma->vm_end); reserve = (end - start) - region_count(resv, start, end); kref_put(&resv->refs, resv_map_release); if (reserve) { /* * Decrement reserve counts. The global reserve count may be * adjusted if the subpool has a minimum size. */ gbl_reserve = hugepage_subpool_put_pages(spool, reserve); hugetlb_acct_memory(h, -gbl_reserve); } } static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) { if (addr & ~(huge_page_mask(hstate_vma(vma)))) return -EINVAL; return 0; } static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) { struct hstate *hstate = hstate_vma(vma); return 1UL << huge_page_shift(hstate); } /* * We cannot handle pagefaults against hugetlb pages at all. They cause * handle_mm_fault() to try to instantiate regular-sized pages in the * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get * this far. */ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) { BUG(); return 0; } /* * When a new function is introduced to vm_operations_struct and added * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. * This is because under System V memory model, mappings created via * shmget/shmat with "huge page" specified are backed by hugetlbfs files, * their original vm_ops are overwritten with shm_vm_ops. */ const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, .close = hugetlb_vm_op_close, .split = hugetlb_vm_op_split, .pagesize = hugetlb_vm_op_pagesize, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) { pte_t entry; if (writable) { entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, vma->vm_page_prot))); } else { entry = huge_pte_wrprotect(mk_huge_pte(page, vma->vm_page_prot)); } entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); entry = arch_make_huge_pte(entry, vma, page, writable); return entry; } static void set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t entry; entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) update_mmu_cache(vma, address, ptep); } bool is_hugetlb_entry_migration(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return false; swp = pte_to_swp_entry(pte); if (non_swap_entry(swp) && is_migration_entry(swp)) return true; else return false; } static int is_hugetlb_entry_hwpoisoned(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return 0; swp = pte_to_swp_entry(pte); if (non_swap_entry(swp) && is_hwpoison_entry(swp)) return 1; else return 0; } int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { pte_t *src_pte, *dst_pte, entry, dst_entry; struct page *ptepage; unsigned long addr; int cow; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; int ret = 0; cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; if (cow) { mmu_notifier_range_init(&range, src, vma->vm_start, vma->vm_end); mmu_notifier_invalidate_range_start(&range); } for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; } /* * If the pagetables are shared don't copy or take references. * dst_pte == src_pte is the common case of src/dest sharing. * * However, src could have 'unshared' and dst shares with * another vma. If dst_pte !none, this implies sharing. * Check here before taking page table lock, and once again * after taking the lock below. */ dst_entry = huge_ptep_get(dst_pte); if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) continue; dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); entry = huge_ptep_get(src_pte); dst_entry = huge_ptep_get(dst_pte); if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { /* * Skip if src entry none. Also, skip in the * unlikely case dst entry !none as this implies * sharing with another vma. */ ; } else if (unlikely(is_hugetlb_entry_migration(entry) || is_hugetlb_entry_hwpoisoned(entry))) { swp_entry_t swp_entry = pte_to_swp_entry(entry); if (is_write_migration_entry(swp_entry) && cow) { /* * COW mappings require pages in both * parent and child to be set to read. */ make_migration_entry_read(&swp_entry); entry = swp_entry_to_pte(swp_entry); set_huge_swap_pte_at(src, addr, src_pte, entry, sz); } set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); } else { if (cow) { /* * No need to notify as we are downgrading page * table protection not changing it to point * to a new page. * * See Documentation/vm/mmu_notifier.rst */ huge_ptep_set_wrprotect(src, addr, src_pte); } entry = huge_ptep_get(src_pte); ptepage = pte_page(entry); get_page(ptepage); page_dup_rmap(ptepage, true); set_huge_pte_at(dst, addr, dst_pte, entry); hugetlb_count_add(pages_per_huge_page(h), dst); } spin_unlock(src_ptl); spin_unlock(dst_ptl); } if (cow) mmu_notifier_invalidate_range_end(&range); return ret; } void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *ptep; pte_t pte; spinlock_t *ptl; struct page *page; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); /* * This is a hugetlb vma, all the pte entries should point * to huge page. */ tlb_remove_check_page_size_change(tlb, sz); tlb_start_vma(tlb, vma); /* * If sharing possible, alert mmu notifiers of worst case. */ mmu_notifier_range_init(&range, mm, start, end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); mmu_notifier_invalidate_range_start(&range); address = start; for (; address < end; address += sz) { ptep = huge_pte_offset(mm, address, sz); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, &address, ptep)) { spin_unlock(ptl); /* * We just unmapped a page of PMDs by clearing a PUD. * The caller's TLB flush range should cover this area. */ continue; } pte = huge_ptep_get(ptep); if (huge_pte_none(pte)) { spin_unlock(ptl); continue; } /* * Migrating hugepage or HWPoisoned hugepage is already * unmapped and its refcount is dropped, so just clear pte here. */ if (unlikely(!pte_present(pte))) { huge_pte_clear(mm, address, ptep, sz); spin_unlock(ptl); continue; } page = pte_page(pte); /* * If a reference page is supplied, it is because a specific * page is being unmapped, not a range. Ensure the page we * are about to unmap is the actual page of interest. */ if (ref_page) { if (page != ref_page) { spin_unlock(ptl); continue; } /* * Mark the VMA as having unmapped its page so that * future faults in this VMA will fail rather than * looking like data was lost */ set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } pte = huge_ptep_get_and_clear(mm, address, ptep); tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) set_page_dirty(page); hugetlb_count_sub(pages_per_huge_page(h), mm); page_remove_rmap(page, true); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); /* * Bail out after unmapping reference page if supplied */ if (ref_page) break; } mmu_notifier_invalidate_range_end(&range); tlb_end_vma(tlb, vma); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { __unmap_hugepage_range(tlb, vma, start, end, ref_page); /* * Clear this flag so that x86's huge_pmd_share page_table_shareable * test will fail on a vma being torn down, and not grab a page table * on its way out. We're lucky that the flag has such an appropriate * name, and can in fact be safely cleared here. We could clear it * before the __unmap_hugepage_range above, but all that's necessary * is to clear it before releasing the i_mmap_rwsem. This works * because in the context this is called, the VMA is about to be * destroyed and the i_mmap_rwsem is held. */ vma->vm_flags &= ~VM_MAYSHARE; } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { struct mm_struct *mm; struct mmu_gather tlb; unsigned long tlb_start = start; unsigned long tlb_end = end; /* * If shared PMDs were possibly used within this vma range, adjust * start/end for worst case tlb flushing. * Note that we can not be sure if PMDs are shared until we try to * unmap pages. However, we want to make sure TLB flushing covers * the largest possible range. */ adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); mm = vma->vm_mm; tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); tlb_finish_mmu(&tlb, tlb_start, tlb_end); } /* * This is called when the original mapper is failing to COW a MAP_PRIVATE * mappping it owns the reserve page for. The intention is to unmap the page * from other VMAs and let the children be SIGKILLed if they are faulting the * same region. */ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) { struct hstate *h = hstate_vma(vma); struct vm_area_struct *iter_vma; struct address_space *mapping; pgoff_t pgoff; /* * vm_pgoff is in PAGE_SIZE units, hence the different calculation * from page cache lookup which is in HPAGE_SIZE units. */ address = address & huge_page_mask(h); pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; mapping = vma->vm_file->f_mapping; /* * Take the mapping lock for the duration of the table walk. As * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ i_mmap_lock_write(mapping); vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) continue; /* * Shared VMAs have their own reserves and do not affect * MAP_PRIVATE accounting but it is possible that a shared * VMA is using the same page so check and skip such VMAs. */ if (iter_vma->vm_flags & VM_MAYSHARE) continue; /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA * could insert a zeroed page instead of the data existing * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } i_mmap_unlock_write(mapping); } /* * Hugetlb_cow() should be called with page lock of the original hugepage held. * Called with hugetlb_instantiation_mutex held and pte_page locked so we * cannot race with other handlers or page migration. * Keep the pte_same checks anyway to make transition from the mutex easier. */ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, struct page *pagecache_page, spinlock_t *ptl) { pte_t pte; struct hstate *h = hstate_vma(vma); struct page *old_page, *new_page; int outside_reserve = 0; vm_fault_t ret = 0; unsigned long haddr = address & huge_page_mask(h); struct mmu_notifier_range range; pte = huge_ptep_get(ptep); old_page = pte_page(pte); retry_avoidcopy: /* If no-one else is actually using this page, avoid the copy * and just make the page writable */ if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { page_move_anon_rmap(old_page, vma); set_huge_ptep_writable(vma, haddr, ptep); return 0; } /* * If the process that created a MAP_PRIVATE mapping is about to * perform a COW due to a shared page count, attempt to satisfy * the allocation without using the existing reserves. The pagecache * page is used to determine if the reserve at this address was * consumed or not. If reserves were used, a partial faulted mapping * at the time of fork() could consume its reserves on COW instead * of the full address range. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_page != pagecache_page) outside_reserve = 1; get_page(old_page); /* * Drop page table lock as buddy allocator may be called. It will * be acquired again before returning to the caller, as expected. */ spin_unlock(ptl); new_page = alloc_huge_page(vma, haddr, outside_reserve); if (IS_ERR(new_page)) { /* * If a process owning a MAP_PRIVATE mapping fails to COW, * it is due to references held by a child and an insufficient * huge page pool. To guarantee the original mappers * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ if (outside_reserve) { put_page(old_page); BUG_ON(huge_pte_none(pte)); unmap_ref_private(mm, vma, old_page, haddr); BUG_ON(huge_pte_none(pte)); spin_lock(ptl); ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) goto retry_avoidcopy; /* * race occurs while re-acquiring page table * lock, and our job is done. */ return 0; } ret = vmf_error(PTR_ERR(new_page)); goto out_release_old; } /* * When the original hugepage is shared one, it does not have * anon_vma prepared. */ if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto out_release_all; } copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h)); mmu_notifier_invalidate_range_start(&range); /* * Retake the page table lock to check for racing updates * before the page tables are altered */ spin_lock(ptl); ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { ClearPagePrivate(new_page); /* Break COW */ huge_ptep_clear_flush(vma, haddr, ptep); mmu_notifier_invalidate_range(mm, range.start, range.end); set_huge_pte_at(mm, haddr, ptep, make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page, true); hugepage_add_new_anon_rmap(new_page, vma, haddr); set_page_huge_active(new_page); /* Make the old page be freed below */ new_page = old_page; } spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); out_release_all: restore_reserve_on_error(h, vma, haddr, new_page); put_page(new_page); out_release_old: put_page(old_page); spin_lock(ptl); /* Caller expects lock to be held */ return ret; } /* Return the pagecache page at a given address within a VMA */ static struct page *hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); return find_lock_page(mapping, idx); } /* * Return whether there is a pagecache page to back given address within VMA. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. */ static bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; struct page *page; mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); page = find_get_page(mapping, idx); if (page) put_page(page); return page != NULL; } int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx) { struct inode *inode = mapping->host; struct hstate *h = hstate_inode(inode); int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); if (err) return err; ClearPagePrivate(page); /* * set page dirty so that it will not be removed from cache/file * by non-hugetlbfs specific code paths. */ set_page_dirty(page); spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); return 0; } static vm_fault_t hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, unsigned int flags) { struct hstate *h = hstate_vma(vma); vm_fault_t ret = VM_FAULT_SIGBUS; int anon_rmap = 0; unsigned long size; struct page *page; pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); bool new_page = false; /* * Currently, we are forced to kill the process in the event the * original mapper has unmapped pages from the child due to a failed * COW. Warn that such a situation has occurred as it may not be obvious */ if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); return ret; } /* * Use page lock to guard against racing truncation * before we get page_table_lock. */ retry: page = find_lock_page(mapping, idx); if (!page) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto out; /* * Check for page in userfault range */ if (userfaultfd_missing(vma)) { u32 hash; struct vm_fault vmf = { .vma = vma, .address = haddr, .flags = flags, /* * Hard to debug if it ends up being * used by a callee that assumes * something about the other * uninitialized fields... same as in * memory.c */ }; /* * hugetlb_fault_mutex must be dropped before * handling userfault. Reacquire after handling * fault to make calling code simpler. */ hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); mutex_unlock(&hugetlb_fault_mutex_table[hash]); ret = handle_userfault(&vmf, VM_UFFD_MISSING); mutex_lock(&hugetlb_fault_mutex_table[hash]); goto out; } page = alloc_huge_page(vma, haddr, 0); if (IS_ERR(page)) { ret = vmf_error(PTR_ERR(page)); goto out; } clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); new_page = true; if (vma->vm_flags & VM_MAYSHARE) { int err = huge_add_to_page_cache(page, mapping, idx); if (err) { put_page(page); if (err == -EEXIST) goto retry; goto out; } } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto backout_unlocked; } anon_rmap = 1; } } else { /* * If memory error occurs between mmap() and fault, some process * don't have hwpoisoned swap entry for errored virtual address. * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { ret = VM_FAULT_HWPOISON | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } } /* * If we are going to COW a private mapping later, we examine the * pending reservations for this page now. This will ensure that * any allocations necessary to record that reservation occur outside * the spinlock. */ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (vma_needs_reservation(h, vma, haddr) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, haddr); } ptl = huge_pte_lock(h, mm, ptep); size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto backout; ret = 0; if (!huge_pte_none(huge_ptep_get(ptep))) goto backout; if (anon_rmap) { ClearPagePrivate(page); hugepage_add_new_anon_rmap(page, vma, haddr); } else page_dup_rmap(page, true); new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, haddr, ptep, new_pte); hugetlb_count_add(pages_per_huge_page(h), mm); if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); } spin_unlock(ptl); /* * Only make newly allocated pages active. Existing pages found * in the pagecache could be !page_huge_active() if they have been * isolated for migration. */ if (new_page) set_page_huge_active(page); unlock_page(page); out: return ret; backout: spin_unlock(ptl); backout_unlocked: unlock_page(page); restore_reserve_on_error(h, vma, haddr, page); put_page(page); goto out; } #ifdef CONFIG_SMP u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) { unsigned long key[2]; u32 hash; if (vma->vm_flags & VM_SHARED) { key[0] = (unsigned long) mapping; key[1] = idx; } else { key[0] = (unsigned long) mm; key[1] = address >> huge_page_shift(h); } hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); return hash & (num_fault_mutexes - 1); } #else /* * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) { return 0; } #endif vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pte_t *ptep, entry; spinlock_t *ptl; vm_fault_t ret; u32 hash; pgoff_t idx; struct page *page = NULL; struct page *pagecache_page = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; int need_wait_lock = 0; unsigned long haddr = address & huge_page_mask(h); ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (ptep) { entry = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_migration(entry))) { migration_entry_wait_huge(vma, mm, ptep); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); } else { ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); if (!ptep) return VM_FAULT_OOM; } mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, haddr); /* * Serialize hugepage allocation and instantiation, so that we don't * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); goto out_mutex; } ret = 0; /* * entry could be a migration/hwpoison entry at this point, so this * check prevents the kernel from going below assuming that we have * a active hugepage in pagecache. This goto expects the 2nd page fault, * and is_hugetlb_entry_(migration|hwpoisoned) check will properly * handle it. */ if (!pte_present(entry)) goto out_mutex; /* * If we are going to COW the mapping later, we examine the pending * reservations for this page now. This will ensure that any * allocations necessary to record that reservation occur outside the * spinlock. For private mappings, we also lookup the pagecache * page now as it is used to determine if a reservation has been * consumed. */ if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { if (vma_needs_reservation(h, vma, haddr) < 0) { ret = VM_FAULT_OOM; goto out_mutex; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, haddr); if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr); } ptl = huge_pte_lock(h, mm, ptep); /* Check for a racing update before calling hugetlb_cow */ if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) goto out_ptl; /* * hugetlb_cow() requires page locks of pte_page(entry) and * pagecache_page, so here we need take the former one * when page != pagecache_page or !pagecache_page. */ page = pte_page(entry); if (page != pagecache_page) if (!trylock_page(page)) { need_wait_lock = 1; goto out_ptl; } get_page(page); if (flags & FAULT_FLAG_WRITE) { if (!huge_pte_write(entry)) { ret = hugetlb_cow(mm, vma, address, ptep, pagecache_page, ptl); goto out_put_page; } entry = huge_pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, haddr, ptep); out_put_page: if (page != pagecache_page) unlock_page(page); put_page(page); out_ptl: spin_unlock(ptl); if (pagecache_page) { unlock_page(pagecache_page); put_page(pagecache_page); } out_mutex: mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and * the page is not used after unlocked before returning from the current * page fault. So we are safe from accessing freed page, even if we wait * here without taking refcount. */ if (need_wait_lock) wait_on_page_locked(page); return ret; } /* * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with * modifications for huge pages. */ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep) { struct address_space *mapping; pgoff_t idx; unsigned long size; int vm_shared = dst_vma->vm_flags & VM_SHARED; struct hstate *h = hstate_vma(dst_vma); pte_t _dst_pte; spinlock_t *ptl; int ret; struct page *page; if (!*pagep) { ret = -ENOMEM; page = alloc_huge_page(dst_vma, dst_addr, 0); if (IS_ERR(page)) goto out; ret = copy_huge_page_from_user(page, (const void __user *) src_addr, pages_per_huge_page(h), false); /* fallback to copy_from_user outside mmap_sem */ if (unlikely(ret)) { ret = -ENOENT; *pagep = page; /* don't free the page */ goto out; } } else { page = *pagep; *pagep = NULL; } /* * The memory barrier inside __SetPageUptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); mapping = dst_vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, dst_vma, dst_addr); /* * If shared, add to page cache */ if (vm_shared) { size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) goto out_release_nounlock; /* * Serialization between remove_inode_hugepages() and * huge_add_to_page_cache() below happens through the * hugetlb_fault_mutex_table that here must be hold by * the caller. */ ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); spin_lock(ptl); /* * Recheck the i_size after holding PT lock to make sure not * to leave any page mapped (as page_mapped()) beyond the end * of the i_size (remove_inode_hugepages() is strict about * enforcing that). If we bail out here, we'll also leave a * page in the radix tree in the vm_shared case beyond the end * of the i_size, but remove_inode_hugepages() will take care * of it as soon as we drop the hugetlb_fault_mutex_table. */ size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) goto out_release_unlock; ret = -EEXIST; if (!huge_pte_none(huge_ptep_get(dst_pte))) goto out_release_unlock; if (vm_shared) { page_dup_rmap(page, true); } else { ClearPagePrivate(page); hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); } _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); if (dst_vma->vm_flags & VM_WRITE) _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, dst_vma->vm_flags & VM_WRITE); hugetlb_count_add(pages_per_huge_page(h), dst_mm); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); set_page_huge_active(page); if (vm_shared) unlock_page(page); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); if (vm_shared) unlock_page(page); out_release_nounlock: put_page(page); goto out; } long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, unsigned long *nr_pages, long i, unsigned int flags, int *nonblocking) { unsigned long pfn_offset; unsigned long vaddr = *position; unsigned long remainder = *nr_pages; struct hstate *h = hstate_vma(vma); int err = -EFAULT; while (vaddr < vma->vm_end && remainder) { pte_t *pte; spinlock_t *ptl = NULL; int absent; struct page *page; /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { remainder = 0; break; } /* * Some archs (sparc64, sh*) have multiple pte_ts to * each hugepage. We have to make sure we get the * first, for the page indexing below to work. * * Note that page table lock is not held when pte is null. */ pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), huge_page_size(h)); if (pte) ptl = huge_pte_lock(h, mm, pte); absent = !pte || huge_pte_none(huge_ptep_get(pte)); /* * When coredumping, it suits get_dump_page if we just return * an error where there's an empty slot with no huge pagecache * to back it. This way, we avoid allocating a hugepage, and * the sparse dumpfile avoids allocating disk blocks, but its * huge holes still show up with zeroes where they need to be. */ if (absent && (flags & FOLL_DUMP) && !hugetlbfs_pagecache_present(h, vma, vaddr)) { if (pte) spin_unlock(ptl); remainder = 0; break; } /* * We need call hugetlb_fault for both hugepages under migration * (in which case hugetlb_fault waits for the migration,) and * hwpoisoned hugepages (in which case we need to prevent the * caller from accessing to them.) In order to do this, we use * here is_swap_pte instead of is_hugetlb_entry_migration and * is_hugetlb_entry_hwpoisoned. This is because it simply covers * both cases, and because we can't follow correct pages * directly from any kind of swap entries. */ if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !huge_pte_write(huge_ptep_get(pte)))) { vm_fault_t ret; unsigned int fault_flags = 0; if (pte) spin_unlock(ptl); if (flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = hugetlb_fault(mm, vma, vaddr, fault_flags); if (ret & VM_FAULT_ERROR) { err = vm_fault_to_errno(ret, flags); remainder = 0; break; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; *nr_pages = 0; /* * VM_FAULT_RETRY must not return an * error, it will return zero * instead. * * No need to update "position" as the * caller will not check it after * *nr_pages is set to 0. */ return i; } continue; } pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; page = pte_page(huge_ptep_get(pte)); /* * Instead of doing 'try_get_page()' below in the same_page * loop, just check the count once here. */ if (unlikely(page_count(page) <= 0)) { if (pages) { spin_unlock(ptl); remainder = 0; err = -ENOMEM; break; } } same_page: if (pages) { pages[i] = mem_map_offset(page, pfn_offset); get_page(pages[i]); } if (vmas) vmas[i] = vma; vaddr += PAGE_SIZE; ++pfn_offset; --remainder; ++i; if (vaddr < vma->vm_end && remainder && pfn_offset < pages_per_huge_page(h)) { /* * We use pfn_offset to avoid touching the pageframes * of this compound page. */ goto same_page; } spin_unlock(ptl); } *nr_pages = remainder; /* * setting position is actually required only if remainder is * not zero but it's faster not to add a "if (remainder)" * branch. */ *position = vaddr; return i ? i : err; } #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE /* * ARCHes with special requirements for evicting HUGETLB backing TLB entries can * implement this. */ #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #endif unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { struct mm_struct *mm = vma->vm_mm; unsigned long start = address; pte_t *ptep; pte_t pte; struct hstate *h = hstate_vma(vma); unsigned long pages = 0; bool shared_pmd = false; struct mmu_notifier_range range; /* * In the case of shared PMDs, the area to flush could be beyond * start/end. Set range.start/range.end to cover the maximum possible * range if PMD sharing is possible. */ mmu_notifier_range_init(&range, mm, start, end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); BUG_ON(address >= end); flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); i_mmap_lock_write(vma->vm_file->f_mapping); for (; address < end; address += huge_page_size(h)) { spinlock_t *ptl; ptep = huge_pte_offset(mm, address, huge_page_size(h)); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, &address, ptep)) { pages++; spin_unlock(ptl); shared_pmd = true; continue; } pte = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { spin_unlock(ptl); continue; } if (unlikely(is_hugetlb_entry_migration(pte))) { swp_entry_t entry = pte_to_swp_entry(pte); if (is_write_migration_entry(entry)) { pte_t newpte; make_migration_entry_read(&entry); newpte = swp_entry_to_pte(entry); set_huge_swap_pte_at(mm, address, ptep, newpte, huge_page_size(h)); pages++; } spin_unlock(ptl); continue; } if (!huge_pte_none(pte)) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(huge_pte_modify(pte, newprot)); pte = arch_make_huge_pte(pte, vma, NULL, 0); set_huge_pte_at(mm, address, ptep, pte); pages++; } spin_unlock(ptl); } /* * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare * may have cleared our pud entry and done put_page on the page table: * once we release i_mmap_rwsem, another task can do the final put_page * and that page table be reused and filled with junk. If we actually * did unshare a page of pmds, flush the range corresponding to the pud. */ if (shared_pmd) flush_hugetlb_tlb_range(vma, range.start, range.end); else flush_hugetlb_tlb_range(vma, start, end); /* * No need to call mmu_notifier_invalidate_range() we are downgrading * page table protection not changing it to point to a new page. * * See Documentation/vm/mmu_notifier.rst */ i_mmap_unlock_write(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(&range); return pages << h->order; } int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) { long ret, chg; struct hstate *h = hstate_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode); struct resv_map *resv_map; long gbl_reserve; /* This should never happen */ if (from > to) { VM_WARN(1, "%s called with a negative range\n", __func__); return -EINVAL; } /* * Only apply hugepage reservation if asked. At fault time, an * attempt will be made for VM_NORESERVE to allocate a page * without using reserves */ if (vm_flags & VM_NORESERVE) return 0; /* * Shared mappings base their reservation on the number of pages that * are already allocated on behalf of the file. Private mappings need * to reserve the full area even if read-only as mprotect() may be * called to make the mapping read-write. Assume !vma is a shm mapping */ if (!vma || vma->vm_flags & VM_MAYSHARE) { resv_map = inode_resv_map(inode); chg = region_chg(resv_map, from, to); } else { resv_map = resv_map_alloc(); if (!resv_map) return -ENOMEM; chg = to - from; set_vma_resv_map(vma, resv_map); set_vma_resv_flags(vma, HPAGE_RESV_OWNER); } if (chg < 0) { ret = chg; goto out_err; } /* * There must be enough pages in the subpool for the mapping. If * the subpool has a minimum size, there may be some global * reservations already in place (gbl_reserve). */ gbl_reserve = hugepage_subpool_get_pages(spool, chg); if (gbl_reserve < 0) { ret = -ENOSPC; goto out_err; } /* * Check enough hugepages are available for the reservation. * Hand the pages back to the subpool if there are not */ ret = hugetlb_acct_memory(h, gbl_reserve); if (ret < 0) { /* put back original number of pages, chg */ (void)hugepage_subpool_put_pages(spool, chg); goto out_err; } /* * Account for the reservations made. Shared mappings record regions * that have reservations as they are shared by multiple VMAs. * When the last VMA disappears, the region map says how much * the reservation was and the page cache tells how much of * the reservation was consumed. Private mappings are per-VMA and * only the consumed reservations are tracked. When the VMA * disappears, the original reservation is the VMA size and the * consumed reservations are stored in the map. Hence, nothing * else has to be done for private mappings here */ if (!vma || vma->vm_flags & VM_MAYSHARE) { long add = region_add(resv_map, from, to); if (unlikely(chg > add)) { /* * pages in this range were added to the reserve * map between region_chg and region_add. This * indicates a race with alloc_huge_page. Adjust * the subpool and reserve counts modified above * based on the difference. */ long rsv_adjust; rsv_adjust = hugepage_subpool_put_pages(spool, chg - add); hugetlb_acct_memory(h, -rsv_adjust); } } return 0; out_err: if (!vma || vma->vm_flags & VM_MAYSHARE) /* Don't call region_abort if region_chg failed */ if (chg >= 0) region_abort(resv_map, from, to); if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_put(&resv_map->refs, resv_map_release); return ret; } long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed) { struct hstate *h = hstate_inode(inode); struct resv_map *resv_map = inode_resv_map(inode); long chg = 0; struct hugepage_subpool *spool = subpool_inode(inode); long gbl_reserve; if (resv_map) { chg = region_del(resv_map, start, end); /* * region_del() can fail in the rare case where a region * must be split and another region descriptor can not be * allocated. If end == LONG_MAX, it will not fail. */ if (chg < 0) return chg; } spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); /* * If the subpool has a minimum size, the number of global * reservations to be released may be adjusted. */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); return 0; } #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE static unsigned long page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) { unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + svma->vm_start; unsigned long sbase = saddr & PUD_MASK; unsigned long s_end = sbase + PUD_SIZE; /* Allow segments to share if only one is marked locked */ unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; /* * match the virtual addresses, permission and the alignment of the * page table page. */ if (pmd_index(addr) != pmd_index(saddr) || vm_flags != svm_flags || sbase < svma->vm_start || svma->vm_end < s_end) return 0; return saddr; } static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) { unsigned long base = addr & PUD_MASK; unsigned long end = base + PUD_SIZE; /* * check on proper vm_flags and page table alignment */ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) return true; return false; } /* * Determine if start,end range within vma could be mapped by shared pmd. * If yes, adjust start and end to cover range associated with possible * shared pmd mappings. */ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { unsigned long check_addr = *start; if (!(vma->vm_flags & VM_MAYSHARE)) return; for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { unsigned long a_start = check_addr & PUD_MASK; unsigned long a_end = a_start + PUD_SIZE; /* * If sharing is possible, adjust start/end if necessary. */ if (range_in_vma(vma, a_start, a_end)) { if (a_start < *start) *start = a_start; if (a_end > *end) *end = a_end; } } } /* * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() * and returns the corresponding pte. While this is not necessary for the * !shared pmd case because we can allocate the pmd later as well, it makes the * code much cleaner. pmd allocation is essential for the shared case because * pud has to be populated inside the same i_mmap_rwsem section - otherwise * racing tasks could either miss the sharing (see huge_pte_offset) or select a * bad pmd for sharing. */ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; pte_t *pte; spinlock_t *ptl; if (!vma_shareable(vma, addr)) return (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_lock_write(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; saddr = page_table_shareable(svma, vma, addr, idx); if (saddr) { spte = huge_pte_offset(svma->vm_mm, saddr, vma_mmu_pagesize(svma)); if (spte) { get_page(virt_to_page(spte)); break; } } } if (!spte) goto out; ptl = huge_pte_lock(hstate_vma(vma), mm, spte); if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); mm_inc_nr_pmds(mm); } else { put_page(virt_to_page(spte)); } spin_unlock(ptl); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_unlock_write(mapping); return pte; } /* * unmap huge page backed by shared pte. * * Hugetlb pte page is ref counted at the time of mapping. If pte is shared * indicated by page_count > 1, unmap is achieved by clearing pud and * decrementing the ref count. If count == 1, the pte page is not shared. * * called with page table lock held. * * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user */ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { pgd_t *pgd = pgd_offset(mm, *addr); p4d_t *p4d = p4d_offset(pgd, *addr); pud_t *pud = pud_offset(p4d, *addr); BUG_ON(page_count(virt_to_page(ptep)) == 0); if (page_count(virt_to_page(ptep)) == 1) return 0; pud_clear(pud); put_page(virt_to_page(ptep)); mm_dec_nr_pmds(mm); *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; return 1; } #define want_pmd_share() (1) #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { return NULL; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { } #define want_pmd_share() (0) #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); if (pud) { if (sz == PUD_SIZE) { pte = (pte_t *)pud; } else { BUG_ON(sz != PMD_SIZE); if (want_pmd_share() && pud_none(*pud)) pte = huge_pmd_share(mm, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); return pte; } /* * huge_pte_offset() - Walk the page table to resolve the hugepage * entry at address @addr * * Return: Pointer to page table or swap entry (PUD or PMD) for * address @addr, or NULL if a p*d_none() entry is encountered and the * size @sz doesn't match the hugepage size at this level of the page * table. */ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (!p4d_present(*p4d)) return NULL; pud = pud_offset(p4d, addr); if (sz != PUD_SIZE && pud_none(*pud)) return NULL; /* hugepage or swap? */ if (pud_huge(*pud) || !pud_present(*pud)) return (pte_t *)pud; pmd = pmd_offset(pud, addr); if (sz != PMD_SIZE && pmd_none(*pmd)) return NULL; /* hugepage or swap? */ if (pmd_huge(*pmd) || !pmd_present(*pmd)) return (pte_t *)pmd; return NULL; } #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /* * These functions are overwritable if your architecture needs its own * behavior. */ struct page * __weak follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } struct page * __weak follow_huge_pd(struct vm_area_struct *vma, unsigned long address, hugepd_t hpd, int flags, int pdshift) { WARN(1, "hugepd follow called with no support for hugepage directory format\n"); return NULL; } struct page * __weak follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int flags) { struct page *page = NULL; spinlock_t *ptl; pte_t pte; retry: ptl = pmd_lockptr(mm, pmd); spin_lock(ptl); /* * make sure that the address range covered by this pmd is not * unmapped from other threads. */ if (!pmd_huge(*pmd)) goto out; pte = huge_ptep_get((pte_t *)pmd); if (pte_present(pte)) { page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); if (flags & FOLL_GET) get_page(page); } else { if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl); __migration_entry_wait(mm, (pte_t *)pmd, ptl); goto retry; } /* * hwpoisoned entry is treated as no_page_table in * follow_page_mask(). */ } out: spin_unlock(ptl); return page; } struct page * __weak follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags) { if (flags & FOLL_GET) return NULL; return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); } struct page * __weak follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) { if (flags & FOLL_GET) return NULL; return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); } bool isolate_huge_page(struct page *page, struct list_head *list) { bool ret = true; VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); if (!page_huge_active(page) || !get_page_unless_zero(page)) { ret = false; goto unlock; } clear_page_huge_active(page); list_move_tail(&page->lru, list); unlock: spin_unlock(&hugetlb_lock); return ret; } void putback_active_hugepage(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); set_page_huge_active(page); list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); spin_unlock(&hugetlb_lock); put_page(page); } void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) { struct hstate *h = page_hstate(oldpage); hugetlb_cgroup_migrate(oldpage, newpage); set_page_owner_migrate_reason(newpage, reason); /* * transfer temporary state of the new huge page. This is * reverse to other transitions because the newpage is going to * be final while the old one will be freed so it takes over * the temporary status. * * Also note that we have to transfer the per-node surplus state * here as well otherwise the global surplus count will not match * the per-node's. */ if (PageHugeTemporary(newpage)) { int old_nid = page_to_nid(oldpage); int new_nid = page_to_nid(newpage); SetPageHugeTemporary(oldpage); ClearPageHugeTemporary(newpage); spin_lock(&hugetlb_lock); if (h->surplus_huge_pages_node[old_nid]) { h->surplus_huge_pages_node[old_nid]--; h->surplus_huge_pages_node[new_nid]++; } spin_unlock(&hugetlb_lock); } }
./CrossVul/dataset_final_sorted/CWE-416/c/good_822_1
crossvul-cpp_data_good_5173_1
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2015 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt. | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Piere-Alain Joye <pierre@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/php_string.h" #include "ext/pcre/php_pcre.h" #include "ext/standard/php_filestat.h" #include "php_zip.h" #include "lib/zip.h" #include "lib/zipint.h" /* zip_open is a macro for renaming libzip zipopen, so we need to use PHP_NAMED_FUNCTION */ static PHP_NAMED_FUNCTION(zif_zip_open); static PHP_NAMED_FUNCTION(zif_zip_read); static PHP_NAMED_FUNCTION(zif_zip_close); static PHP_NAMED_FUNCTION(zif_zip_entry_read); static PHP_NAMED_FUNCTION(zif_zip_entry_filesize); static PHP_NAMED_FUNCTION(zif_zip_entry_name); static PHP_NAMED_FUNCTION(zif_zip_entry_compressedsize); static PHP_NAMED_FUNCTION(zif_zip_entry_compressionmethod); static PHP_NAMED_FUNCTION(zif_zip_entry_open); static PHP_NAMED_FUNCTION(zif_zip_entry_close); #ifdef HAVE_GLOB #ifndef PHP_WIN32 #include <glob.h> #else #include "win32/glob.h" #endif #endif /* {{{ Resource le */ static int le_zip_dir; #define le_zip_dir_name "Zip Directory" static int le_zip_entry; #define le_zip_entry_name "Zip Entry" /* }}} */ /* {{{ PHP_ZIP_STAT_INDEX(za, index, flags, sb) */ #define PHP_ZIP_STAT_INDEX(za, index, flags, sb) \ if (zip_stat_index(za, index, flags, &sb) != 0) { \ RETURN_FALSE; \ } /* }}} */ /* {{{ PHP_ZIP_STAT_PATH(za, path, path_len, flags, sb) */ #define PHP_ZIP_STAT_PATH(za, path, path_len, flags, sb) \ if (path_len < 1) { \ php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as entry name"); \ RETURN_FALSE; \ } \ if (zip_stat(za, path, flags, &sb) != 0) { \ RETURN_FALSE; \ } /* }}} */ /* {{{ PHP_ZIP_SET_FILE_COMMENT(za, index, comment, comment_len) */ #define PHP_ZIP_SET_FILE_COMMENT(za, index, comment, comment_len) \ if (comment_len == 0) { \ /* Passing NULL remove the existing comment */ \ if (zip_set_file_comment(intern, index, NULL, 0) < 0) { \ RETURN_FALSE; \ } \ } else if (zip_set_file_comment(intern, index, comment, comment_len) < 0) { \ RETURN_FALSE; \ } \ RETURN_TRUE; /* }}} */ #if (PHP_MAJOR_VERSION < 6) # define add_ascii_assoc_string add_assoc_string # define add_ascii_assoc_long add_assoc_long #endif /* Flatten a path by making a relative path (to .)*/ static char * php_zip_make_relative_path(char *path, int path_len) /* {{{ */ { char *path_begin = path; size_t i; if (path_len < 1 || path == NULL) { return NULL; } if (IS_SLASH(path[0])) { return path + 1; } i = path_len; while (1) { while (i > 0 && !IS_SLASH(path[i])) { i--; } if (!i) { return path; } if (i >= 2 && (path[i -1] == '.' || path[i -1] == ':')) { /* i is the position of . or :, add 1 for / */ path_begin = path + i + 1; break; } i--; } return path_begin; } /* }}} */ #ifdef PHP_ZIP_USE_OO /* {{{ php_zip_extract_file */ static int php_zip_extract_file(struct zip * za, char *dest, char *file, int file_len TSRMLS_DC) { php_stream_statbuf ssb; struct zip_file *zf; struct zip_stat sb; char b[8192]; int n, len, ret; php_stream *stream; char *fullpath; char *file_dirname_fullpath; char file_dirname[MAXPATHLEN]; size_t dir_len; char *file_basename; size_t file_basename_len; int is_dir_only = 0; char *path_cleaned; size_t path_cleaned_len; cwd_state new_state; new_state.cwd = (char*)malloc(1); new_state.cwd[0] = '\0'; new_state.cwd_length = 0; /* Clean/normlize the path and then transform any path (absolute or relative) to a path relative to cwd (../../mydir/foo.txt > mydir/foo.txt) */ virtual_file_ex(&new_state, file, NULL, CWD_EXPAND TSRMLS_CC); path_cleaned = php_zip_make_relative_path(new_state.cwd, new_state.cwd_length); if(!path_cleaned) { return 0; } path_cleaned_len = strlen(path_cleaned); if (path_cleaned_len >= MAXPATHLEN || zip_stat(za, file, 0, &sb) != 0) { return 0; } /* it is a directory only, see #40228 */ if (path_cleaned_len > 1 && IS_SLASH(path_cleaned[path_cleaned_len - 1])) { len = spprintf(&file_dirname_fullpath, 0, "%s/%s", dest, path_cleaned); is_dir_only = 1; } else { memcpy(file_dirname, path_cleaned, path_cleaned_len); dir_len = php_dirname(file_dirname, path_cleaned_len); if (dir_len <= 0 || (dir_len == 1 && file_dirname[0] == '.')) { len = spprintf(&file_dirname_fullpath, 0, "%s", dest); } else { len = spprintf(&file_dirname_fullpath, 0, "%s/%s", dest, file_dirname); } php_basename(path_cleaned, path_cleaned_len, NULL, 0, &file_basename, (size_t *)&file_basename_len TSRMLS_CC); if (ZIP_OPENBASEDIR_CHECKPATH(file_dirname_fullpath)) { efree(file_dirname_fullpath); efree(file_basename); free(new_state.cwd); return 0; } } /* let see if the path already exists */ if (php_stream_stat_path_ex(file_dirname_fullpath, PHP_STREAM_URL_STAT_QUIET, &ssb, NULL) < 0) { #if defined(PHP_WIN32) && (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION == 1) char *e; e = file_dirname_fullpath; while (*e) { if (*e == '/') { *e = DEFAULT_SLASH; } e++; } #endif ret = php_stream_mkdir(file_dirname_fullpath, 0777, PHP_STREAM_MKDIR_RECURSIVE|REPORT_ERRORS, NULL); if (!ret) { efree(file_dirname_fullpath); if (!is_dir_only) { efree(file_basename); free(new_state.cwd); } return 0; } } /* it is a standalone directory, job done */ if (is_dir_only) { efree(file_dirname_fullpath); free(new_state.cwd); return 1; } len = spprintf(&fullpath, 0, "%s/%s", file_dirname_fullpath, file_basename); if (!len) { efree(file_dirname_fullpath); efree(file_basename); free(new_state.cwd); return 0; } else if (len > MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Full extraction path exceed MAXPATHLEN (%i)", MAXPATHLEN); efree(file_dirname_fullpath); efree(file_basename); free(new_state.cwd); return 0; } /* check again the full path, not sure if it * is required, does a file can have a different * safemode status as its parent folder? */ if (ZIP_OPENBASEDIR_CHECKPATH(fullpath)) { efree(fullpath); efree(file_dirname_fullpath); efree(file_basename); free(new_state.cwd); return 0; } #if PHP_API_VERSION < 20100412 stream = php_stream_open_wrapper(fullpath, "w+b", REPORT_ERRORS|ENFORCE_SAFE_MODE, NULL); #else stream = php_stream_open_wrapper(fullpath, "w+b", REPORT_ERRORS, NULL); #endif if (stream == NULL) { n = -1; goto done; } zf = zip_fopen(za, file, 0); if (zf == NULL) { n = -1; php_stream_close(stream); goto done; } n = 0; while ((n=zip_fread(zf, b, sizeof(b))) > 0) { php_stream_write(stream, b, n); } php_stream_close(stream); n = zip_fclose(zf); done: efree(fullpath); efree(file_basename); efree(file_dirname_fullpath); free(new_state.cwd); if (n<0) { return 0; } else { return 1; } } /* }}} */ static int php_zip_add_file(struct zip *za, const char *filename, size_t filename_len, char *entry_name, size_t entry_name_len, long offset_start, long offset_len TSRMLS_DC) /* {{{ */ { struct zip_source *zs; int cur_idx; char resolved_path[MAXPATHLEN]; zval exists_flag; if (ZIP_OPENBASEDIR_CHECKPATH(filename)) { return -1; } if (!expand_filepath(filename, resolved_path TSRMLS_CC)) { return -1; } php_stat(resolved_path, strlen(resolved_path), FS_EXISTS, &exists_flag TSRMLS_CC); if (!Z_BVAL(exists_flag)) { return -1; } zs = zip_source_file(za, resolved_path, offset_start, offset_len); if (!zs) { return -1; } cur_idx = zip_name_locate(za, (const char *)entry_name, 0); /* TODO: fix _zip_replace */ if (cur_idx<0) { /* reset the error */ if (za->error.str) { _zip_error_fini(&za->error); } _zip_error_init(&za->error); } else { if (zip_delete(za, cur_idx) == -1) { zip_source_free(zs); return -1; } } if (zip_add(za, entry_name, zs) == -1) { return -1; } else { return 1; } } /* }}} */ static int php_zip_parse_options(zval *options, long *remove_all_path, char **remove_path, int *remove_path_len, char **add_path, int *add_path_len TSRMLS_DC) /* {{{ */ { zval **option; if (zend_hash_find(HASH_OF(options), "remove_all_path", sizeof("remove_all_path"), (void **)&option) == SUCCESS) { long opt; if (Z_TYPE_PP(option) != IS_LONG) { zval tmp = **option; zval_copy_ctor(&tmp); convert_to_long(&tmp); opt = Z_LVAL(tmp); } else { opt = Z_LVAL_PP(option); } *remove_all_path = opt; } /* If I add more options, it would make sense to create a nice static struct and loop over it. */ if (zend_hash_find(HASH_OF(options), "remove_path", sizeof("remove_path"), (void **)&option) == SUCCESS) { if (Z_TYPE_PP(option) != IS_STRING) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "remove_path option expected to be a string"); return -1; } if (Z_STRLEN_PP(option) < 1) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string given as remove_path option"); return -1; } if (Z_STRLEN_PP(option) >= MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "remove_path string is too long (max: %i, %i given)", MAXPATHLEN - 1, Z_STRLEN_PP(option)); return -1; } *remove_path_len = Z_STRLEN_PP(option); *remove_path = Z_STRVAL_PP(option); } if (zend_hash_find(HASH_OF(options), "add_path", sizeof("add_path"), (void **)&option) == SUCCESS) { if (Z_TYPE_PP(option) != IS_STRING) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "add_path option expected to be a string"); return -1; } if (Z_STRLEN_PP(option) < 1) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string given as the add_path option"); return -1; } if (Z_STRLEN_PP(option) >= MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "add_path string too long (max: %i, %i given)", MAXPATHLEN - 1, Z_STRLEN_PP(option)); return -1; } *add_path_len = Z_STRLEN_PP(option); *add_path = Z_STRVAL_PP(option); } return 1; } /* }}} */ /* {{{ REGISTER_ZIP_CLASS_CONST_LONG */ #define REGISTER_ZIP_CLASS_CONST_LONG(const_name, value) \ zend_declare_class_constant_long(zip_class_entry, const_name, sizeof(const_name)-1, (long)value TSRMLS_CC); /* }}} */ /* {{{ ZIP_FROM_OBJECT */ #define ZIP_FROM_OBJECT(intern, object) \ { \ ze_zip_object *obj = (ze_zip_object*) zend_object_store_get_object(object TSRMLS_CC); \ intern = obj->za; \ if (!intern) { \ php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid or uninitialized Zip object"); \ RETURN_FALSE; \ } \ } /* }}} */ /* {{{ RETURN_SB(sb) */ #define RETURN_SB(sb) \ { \ array_init(return_value); \ add_ascii_assoc_string(return_value, "name", (char *)(sb)->name, 1); \ add_ascii_assoc_long(return_value, "index", (long) (sb)->index); \ add_ascii_assoc_long(return_value, "crc", (long) (sb)->crc); \ add_ascii_assoc_long(return_value, "size", (long) (sb)->size); \ add_ascii_assoc_long(return_value, "mtime", (long) (sb)->mtime); \ add_ascii_assoc_long(return_value, "comp_size", (long) (sb)->comp_size); \ add_ascii_assoc_long(return_value, "comp_method", (long) (sb)->comp_method); \ } /* }}} */ static int php_zip_status(struct zip *za TSRMLS_DC) /* {{{ */ { int zep, syp; zip_error_get(za, &zep, &syp); return zep; } /* }}} */ static int php_zip_status_sys(struct zip *za TSRMLS_DC) /* {{{ */ { int zep, syp; zip_error_get(za, &zep, &syp); return syp; } /* }}} */ static int php_zip_get_num_files(struct zip *za TSRMLS_DC) /* {{{ */ { return zip_get_num_files(za); } /* }}} */ static char * php_zipobj_get_filename(ze_zip_object *obj TSRMLS_DC) /* {{{ */ { if (!obj) { return NULL; } if (obj->filename) { return obj->filename; } return NULL; } /* }}} */ static char * php_zipobj_get_zip_comment(struct zip *za, int *len TSRMLS_DC) /* {{{ */ { if (za) { return (char *)zip_get_archive_comment(za, len, 0); } return NULL; } /* }}} */ #ifdef HAVE_GLOB /* {{{ */ #ifndef GLOB_ONLYDIR #define GLOB_ONLYDIR (1<<30) #define GLOB_EMULATE_ONLYDIR #define GLOB_FLAGMASK (~GLOB_ONLYDIR) #else #define GLOB_FLAGMASK (~0) #endif #ifndef GLOB_BRACE # define GLOB_BRACE 0 #endif #ifndef GLOB_MARK # define GLOB_MARK 0 #endif #ifndef GLOB_NOSORT # define GLOB_NOSORT 0 #endif #ifndef GLOB_NOCHECK # define GLOB_NOCHECK 0 #endif #ifndef GLOB_NOESCAPE # define GLOB_NOESCAPE 0 #endif #ifndef GLOB_ERR # define GLOB_ERR 0 #endif /* This is used for checking validity of passed flags (passing invalid flags causes segfault in glob()!! */ #define GLOB_AVAILABLE_FLAGS (0 | GLOB_BRACE | GLOB_MARK | GLOB_NOSORT | GLOB_NOCHECK | GLOB_NOESCAPE | GLOB_ERR | GLOB_ONLYDIR) #endif /* }}} */ int php_zip_glob(char *pattern, int pattern_len, long flags, zval *return_value TSRMLS_DC) /* {{{ */ { #ifdef HAVE_GLOB char cwd[MAXPATHLEN]; int cwd_skip = 0; #ifdef ZTS char work_pattern[MAXPATHLEN]; char *result; #endif glob_t globbuf; int n; int ret; if (pattern_len >= MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Pattern exceeds the maximum allowed length of %d characters", MAXPATHLEN); return -1; } if ((GLOB_AVAILABLE_FLAGS & flags) != flags) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "At least one of the passed flags is invalid or not supported on this platform"); return -1; } #ifdef ZTS if (!IS_ABSOLUTE_PATH(pattern, pattern_len)) { result = VCWD_GETCWD(cwd, MAXPATHLEN); if (!result) { cwd[0] = '\0'; } #ifdef PHP_WIN32 if (IS_SLASH(*pattern)) { cwd[2] = '\0'; } #endif cwd_skip = strlen(cwd)+1; snprintf(work_pattern, MAXPATHLEN, "%s%c%s", cwd, DEFAULT_SLASH, pattern); pattern = work_pattern; } #endif globbuf.gl_offs = 0; if (0 != (ret = glob(pattern, flags & GLOB_FLAGMASK, NULL, &globbuf))) { #ifdef GLOB_NOMATCH if (GLOB_NOMATCH == ret) { /* Some glob implementation simply return no data if no matches were found, others return the GLOB_NOMATCH error code. We don't want to treat GLOB_NOMATCH as an error condition so that PHP glob() behaves the same on both types of implementations and so that 'foreach (glob() as ...' can be used for simple glob() calls without further error checking. */ array_init(return_value); return 0; } #endif return 0; } /* now catch the FreeBSD style of "no matches" */ if (!globbuf.gl_pathc || !globbuf.gl_pathv) { array_init(return_value); return 0; } /* we assume that any glob pattern will match files from one directory only so checking the dirname of the first match should be sufficient */ strncpy(cwd, globbuf.gl_pathv[0], MAXPATHLEN); if (ZIP_OPENBASEDIR_CHECKPATH(cwd)) { return -1; } array_init(return_value); for (n = 0; n < globbuf.gl_pathc; n++) { /* we need to do this everytime since GLOB_ONLYDIR does not guarantee that * all directories will be filtered. GNU libc documentation states the * following: * If the information about the type of the file is easily available * non-directories will be rejected but no extra work will be done to * determine the information for each file. I.e., the caller must still be * able to filter directories out. */ if (flags & GLOB_ONLYDIR) { struct stat s; if (0 != VCWD_STAT(globbuf.gl_pathv[n], &s)) { continue; } if (S_IFDIR != (s.st_mode & S_IFMT)) { continue; } } add_next_index_string(return_value, globbuf.gl_pathv[n]+cwd_skip, 1); } globfree(&globbuf); return globbuf.gl_pathc; #else php_error_docref(NULL TSRMLS_CC, E_ERROR, "Glob support is not available"); return 0; #endif /* HAVE_GLOB */ } /* }}} */ int php_zip_pcre(char *regexp, int regexp_len, char *path, int path_len, zval *return_value TSRMLS_DC) /* {{{ */ { #ifdef ZTS char cwd[MAXPATHLEN]; int cwd_skip = 0; char work_path[MAXPATHLEN]; char *result; #endif int files_cnt; char **namelist; #ifdef ZTS if (!IS_ABSOLUTE_PATH(path, path_len)) { result = VCWD_GETCWD(cwd, MAXPATHLEN); if (!result) { cwd[0] = '\0'; } #ifdef PHP_WIN32 if (IS_SLASH(*path)) { cwd[2] = '\0'; } #endif cwd_skip = strlen(cwd)+1; snprintf(work_path, MAXPATHLEN, "%s%c%s", cwd, DEFAULT_SLASH, path); path = work_path; } #endif if (ZIP_OPENBASEDIR_CHECKPATH(path)) { return -1; } files_cnt = php_stream_scandir(path, &namelist, NULL, (void *) php_stream_dirent_alphasort); if (files_cnt > 0) { pcre *re = NULL; pcre_extra *pcre_extra = NULL; int preg_options = 0, i; re = pcre_get_compiled_regex(regexp, &pcre_extra, &preg_options TSRMLS_CC); if (!re) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid expression"); return -1; } array_init(return_value); /* only the files, directories are ignored */ for (i = 0; i < files_cnt; i++) { struct stat s; char fullpath[MAXPATHLEN]; int ovector[3]; int matches; int namelist_len = strlen(namelist[i]); if ((namelist_len == 1 && namelist[i][0] == '.') || (namelist_len == 2 && namelist[i][0] == '.' && namelist[i][1] == '.')) { efree(namelist[i]); continue; } if ((path_len + namelist_len + 1) >= MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "add_path string too long (max: %i, %i given)", MAXPATHLEN - 1, (path_len + namelist_len + 1)); efree(namelist[i]); break; } snprintf(fullpath, MAXPATHLEN, "%s%c%s", path, DEFAULT_SLASH, namelist[i]); if (0 != VCWD_STAT(fullpath, &s)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot read <%s>", fullpath); efree(namelist[i]); continue; } if (S_IFDIR == (s.st_mode & S_IFMT)) { efree(namelist[i]); continue; } matches = pcre_exec(re, NULL, namelist[i], strlen(namelist[i]), 0, 0, ovector, 3); /* 0 means that the vector is too small to hold all the captured substring offsets */ if (matches < 0) { efree(namelist[i]); continue; } add_next_index_string(return_value, fullpath, 1); efree(namelist[i]); } efree(namelist); } return files_cnt; } /* }}} */ #endif /* {{{ arginfo */ ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_open, 0, 0, 1) ZEND_ARG_INFO(0, filename) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_close, 0, 0, 1) ZEND_ARG_INFO(0, zip) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_read, 0, 0, 1) ZEND_ARG_INFO(0, zip) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_open, 0, 0, 2) ZEND_ARG_INFO(0, zip_dp) ZEND_ARG_INFO(0, zip_entry) ZEND_ARG_INFO(0, mode) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_close, 0, 0, 1) ZEND_ARG_INFO(0, zip_ent) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_read, 0, 0, 1) ZEND_ARG_INFO(0, zip_entry) ZEND_ARG_INFO(0, len) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_name, 0, 0, 1) ZEND_ARG_INFO(0, zip_entry) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_compressedsize, 0, 0, 1) ZEND_ARG_INFO(0, zip_entry) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_filesize, 0, 0, 1) ZEND_ARG_INFO(0, zip_entry) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_zip_entry_compressionmethod, 0, 0, 1) ZEND_ARG_INFO(0, zip_entry) ZEND_END_ARG_INFO() /* }}} */ /* {{{ zend_function_entry */ static const zend_function_entry zip_functions[] = { ZEND_RAW_FENTRY("zip_open", zif_zip_open, arginfo_zip_open, 0) ZEND_RAW_FENTRY("zip_close", zif_zip_close, arginfo_zip_close, 0) ZEND_RAW_FENTRY("zip_read", zif_zip_read, arginfo_zip_read, 0) PHP_FE(zip_entry_open, arginfo_zip_entry_open) PHP_FE(zip_entry_close, arginfo_zip_entry_close) PHP_FE(zip_entry_read, arginfo_zip_entry_read) PHP_FE(zip_entry_filesize, arginfo_zip_entry_filesize) PHP_FE(zip_entry_name, arginfo_zip_entry_name) PHP_FE(zip_entry_compressedsize, arginfo_zip_entry_compressedsize) PHP_FE(zip_entry_compressionmethod, arginfo_zip_entry_compressionmethod) PHP_FE_END }; /* }}} */ /* {{{ ZE2 OO definitions */ #ifdef PHP_ZIP_USE_OO static zend_class_entry *zip_class_entry; static zend_object_handlers zip_object_handlers; static HashTable zip_prop_handlers; typedef int (*zip_read_int_t)(struct zip *za TSRMLS_DC); typedef char *(*zip_read_const_char_t)(struct zip *za, int *len TSRMLS_DC); typedef char *(*zip_read_const_char_from_ze_t)(ze_zip_object *obj TSRMLS_DC); typedef struct _zip_prop_handler { zip_read_int_t read_int_func; zip_read_const_char_t read_const_char_func; zip_read_const_char_from_ze_t read_const_char_from_obj_func; int type; } zip_prop_handler; #endif /* }}} */ #ifdef PHP_ZIP_USE_OO static void php_zip_register_prop_handler(HashTable *prop_handler, char *name, zip_read_int_t read_int_func, zip_read_const_char_t read_char_func, zip_read_const_char_from_ze_t read_char_from_obj_func, int rettype TSRMLS_DC) /* {{{ */ { zip_prop_handler hnd; hnd.read_const_char_func = read_char_func; hnd.read_int_func = read_int_func; hnd.read_const_char_from_obj_func = read_char_from_obj_func; hnd.type = rettype; zend_hash_add(prop_handler, name, strlen(name)+1, &hnd, sizeof(zip_prop_handler), NULL); } /* }}} */ static int php_zip_property_reader(ze_zip_object *obj, zip_prop_handler *hnd, zval **retval, int newzval TSRMLS_DC) /* {{{ */ { const char *retchar = NULL; int retint = 0; int len = 0; if (obj && obj->za != NULL) { if (hnd->read_const_char_func) { retchar = hnd->read_const_char_func(obj->za, &len TSRMLS_CC); } else { if (hnd->read_int_func) { retint = hnd->read_int_func(obj->za TSRMLS_CC); if (retint == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Internal zip error returned"); return FAILURE; } } else { if (hnd->read_const_char_from_obj_func) { retchar = hnd->read_const_char_from_obj_func(obj TSRMLS_CC); len = strlen(retchar); } } } } if (newzval) { ALLOC_ZVAL(*retval); } switch (hnd->type) { case IS_STRING: if (retchar) { ZVAL_STRINGL(*retval, (char *) retchar, len, 1); } else { ZVAL_EMPTY_STRING(*retval); } break; case IS_BOOL: ZVAL_BOOL(*retval, (long)retint); break; case IS_LONG: ZVAL_LONG(*retval, (long)retint); break; default: ZVAL_NULL(*retval); } return SUCCESS; } /* }}} */ static zval **php_zip_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { ze_zip_object *obj; zval tmp_member; zval **retval = NULL; zip_prop_handler *hnd; zend_object_handlers *std_hnd; int ret; if (member->type != IS_STRING) { tmp_member = *member; zval_copy_ctor(&tmp_member); convert_to_string(&tmp_member); member = &tmp_member; key = NULL; } ret = FAILURE; obj = (ze_zip_object *)zend_objects_get_address(object TSRMLS_CC); if (obj->prop_handler != NULL) { if (key) { ret = zend_hash_quick_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, key->hash_value, (void **) &hnd); } else { ret = zend_hash_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, (void **) &hnd); } } if (ret == FAILURE) { std_hnd = zend_get_std_object_handlers(); retval = std_hnd->get_property_ptr_ptr(object, member, type, key TSRMLS_CC); } if (member == &tmp_member) { zval_dtor(member); } return retval; } /* }}} */ static zval* php_zip_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { ze_zip_object *obj; zval tmp_member; zval *retval; zip_prop_handler *hnd; zend_object_handlers *std_hnd; int ret; if (member->type != IS_STRING) { tmp_member = *member; zval_copy_ctor(&tmp_member); convert_to_string(&tmp_member); member = &tmp_member; key = NULL; } ret = FAILURE; obj = (ze_zip_object *)zend_objects_get_address(object TSRMLS_CC); if (obj->prop_handler != NULL) { if (key) { ret = zend_hash_quick_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, key->hash_value, (void **) &hnd); } else { ret = zend_hash_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, (void **) &hnd); } } if (ret == SUCCESS) { ret = php_zip_property_reader(obj, hnd, &retval, 1 TSRMLS_CC); if (ret == SUCCESS) { /* ensure we're creating a temporary variable */ Z_SET_REFCOUNT_P(retval, 0); } else { retval = EG(uninitialized_zval_ptr); } } else { std_hnd = zend_get_std_object_handlers(); retval = std_hnd->read_property(object, member, type, key TSRMLS_CC); } if (member == &tmp_member) { zval_dtor(member); } return retval; } /* }}} */ static int php_zip_has_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { ze_zip_object *obj; zval tmp_member; zip_prop_handler *hnd; zend_object_handlers *std_hnd; int ret, retval = 0; if (member->type != IS_STRING) { tmp_member = *member; zval_copy_ctor(&tmp_member); convert_to_string(&tmp_member); member = &tmp_member; key = NULL; } ret = FAILURE; obj = (ze_zip_object *)zend_objects_get_address(object TSRMLS_CC); if (obj->prop_handler != NULL) { if (key) { ret = zend_hash_quick_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, key->hash_value, (void **) &hnd); } else { ret = zend_hash_find(obj->prop_handler, Z_STRVAL_P(member), Z_STRLEN_P(member)+1, (void **) &hnd); } } if (ret == SUCCESS) { zval *tmp; ALLOC_INIT_ZVAL(tmp); if (type == 2) { retval = 1; } else if (php_zip_property_reader(obj, hnd, &tmp, 0 TSRMLS_CC) == SUCCESS) { Z_SET_REFCOUNT_P(tmp, 1); Z_UNSET_ISREF_P(tmp); if (type == 1) { retval = zend_is_true(tmp); } else if (type == 0) { retval = (Z_TYPE_P(tmp) != IS_NULL); } } zval_ptr_dtor(&tmp); } else { std_hnd = zend_get_std_object_handlers(); retval = std_hnd->has_property(object, member, type, key TSRMLS_CC); } if (member == &tmp_member) { zval_dtor(member); } return retval; } /* }}} */ static HashTable *php_zip_get_gc(zval *object, zval ***gc_data, int *gc_data_count TSRMLS_DC) /* {{{ */ { *gc_data = NULL; *gc_data_count = 0; return zend_std_get_properties(object TSRMLS_CC); } /* }}} */ static HashTable *php_zip_get_properties(zval *object TSRMLS_DC)/* {{{ */ { ze_zip_object *obj; zip_prop_handler *hnd; HashTable *props; zval *val; int ret; char *key; uint key_len; HashPosition pos; ulong num_key; obj = (ze_zip_object *)zend_objects_get_address(object TSRMLS_CC); props = zend_std_get_properties(object TSRMLS_CC); if (obj->prop_handler == NULL) { return NULL; } zend_hash_internal_pointer_reset_ex(obj->prop_handler, &pos); while (zend_hash_get_current_data_ex(obj->prop_handler, (void**)&hnd, &pos) == SUCCESS) { zend_hash_get_current_key_ex(obj->prop_handler, &key, &key_len, &num_key, 0, &pos); MAKE_STD_ZVAL(val); ret = php_zip_property_reader(obj, hnd, &val, 0 TSRMLS_CC); if (ret != SUCCESS) { val = EG(uninitialized_zval_ptr); } zend_hash_update(props, key, key_len, (void *)&val, sizeof(zval *), NULL); zend_hash_move_forward_ex(obj->prop_handler, &pos); } return props; } /* }}} */ static void php_zip_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { ze_zip_object * intern = (ze_zip_object *) object; int i; if (!intern) { return; } if (intern->za) { if (zip_close(intern->za) != 0) { _zip_free(intern->za); } intern->za = NULL; } if (intern->buffers_cnt>0) { for (i=0; i<intern->buffers_cnt; i++) { efree(intern->buffers[i]); } efree(intern->buffers); } intern->za = NULL; #if (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION == 1 && PHP_RELEASE_VERSION > 2) || (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION > 1) || (PHP_MAJOR_VERSION > 5) zend_object_std_dtor(&intern->zo TSRMLS_CC); #else if (intern->zo.guards) { zend_hash_destroy(intern->zo.guards); FREE_HASHTABLE(intern->zo.guards); } if (intern->zo.properties) { zend_hash_destroy(intern->zo.properties); FREE_HASHTABLE(intern->zo.properties); } #endif if (intern->filename) { efree(intern->filename); } efree(intern); } /* }}} */ static zend_object_value php_zip_object_new(zend_class_entry *class_type TSRMLS_DC) /* {{{ */ { ze_zip_object *intern; zend_object_value retval; intern = emalloc(sizeof(ze_zip_object)); memset(&intern->zo, 0, sizeof(zend_object)); intern->za = NULL; intern->buffers = NULL; intern->filename = NULL; intern->buffers_cnt = 0; intern->prop_handler = &zip_prop_handlers; #if ((PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION > 1) || (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION == 1 && PHP_RELEASE_VERSION > 2)) zend_object_std_init(&intern->zo, class_type TSRMLS_CC); #else ALLOC_HASHTABLE(intern->zo.properties); zend_hash_init(intern->zo.properties, 0, NULL, ZVAL_PTR_DTOR, 0); intern->zo.ce = class_type; #endif object_properties_init(&intern->zo, class_type); retval.handle = zend_objects_store_put(intern, NULL, (zend_objects_free_object_storage_t) php_zip_object_free_storage, NULL TSRMLS_CC); retval.handlers = (zend_object_handlers *) & zip_object_handlers; return retval; } /* }}} */ #endif /* {{{ Resource dtors */ /* {{{ php_zip_free_dir */ static void php_zip_free_dir(zend_rsrc_list_entry *rsrc TSRMLS_DC) { zip_rsrc * zip_int = (zip_rsrc *) rsrc->ptr; if (zip_int) { if (zip_int->za) { if (zip_close(zip_int->za) != 0) { _zip_free(zip_int->za); } zip_int->za = NULL; } efree(rsrc->ptr); rsrc->ptr = NULL; } } /* }}} */ /* {{{ php_zip_free_entry */ static void php_zip_free_entry(zend_rsrc_list_entry *rsrc TSRMLS_DC) { zip_read_rsrc *zr_rsrc = (zip_read_rsrc *) rsrc->ptr; if (zr_rsrc) { if (zr_rsrc->zf) { if (zr_rsrc->zf->za) { zip_fclose(zr_rsrc->zf); } else { if (zr_rsrc->zf->src) zip_source_free(zr_rsrc->zf->src); free(zr_rsrc->zf); } zr_rsrc->zf = NULL; } efree(zr_rsrc); rsrc->ptr = NULL; } } /* }}} */ /* }}}*/ /* reset macro */ /* {{{ function prototypes */ static PHP_MINIT_FUNCTION(zip); static PHP_MSHUTDOWN_FUNCTION(zip); static PHP_MINFO_FUNCTION(zip); /* }}} */ /* {{{ zip_module_entry */ zend_module_entry zip_module_entry = { STANDARD_MODULE_HEADER, "zip", zip_functions, PHP_MINIT(zip), PHP_MSHUTDOWN(zip), NULL, NULL, PHP_MINFO(zip), PHP_ZIP_VERSION_STRING, STANDARD_MODULE_PROPERTIES }; /* }}} */ #ifdef COMPILE_DL_ZIP ZEND_GET_MODULE(zip) #endif /* set macro */ /* {{{ proto resource zip_open(string filename) Create new zip using source uri for output */ static PHP_NAMED_FUNCTION(zif_zip_open) { char *filename; int filename_len; char resolved_path[MAXPATHLEN + 1]; zip_rsrc *rsrc_int; int err = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &filename, &filename_len) == FAILURE) { return; } if (filename_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string as source"); RETURN_FALSE; } if (ZIP_OPENBASEDIR_CHECKPATH(filename)) { RETURN_FALSE; } if(!expand_filepath(filename, resolved_path TSRMLS_CC)) { RETURN_FALSE; } rsrc_int = (zip_rsrc *)emalloc(sizeof(zip_rsrc)); rsrc_int->za = zip_open(resolved_path, 0, &err); if (rsrc_int->za == NULL) { efree(rsrc_int); RETURN_LONG((long)err); } rsrc_int->index_current = 0; rsrc_int->num_files = zip_get_num_files(rsrc_int->za); ZEND_REGISTER_RESOURCE(return_value, rsrc_int, le_zip_dir); } /* }}} */ /* {{{ proto void zip_close(resource zip) Close a Zip archive */ static PHP_NAMED_FUNCTION(zif_zip_close) { zval * zip; zip_rsrc *z_rsrc = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &zip) == FAILURE) { return; } ZEND_FETCH_RESOURCE(z_rsrc, zip_rsrc *, &zip, -1, le_zip_dir_name, le_zip_dir); /* really close the zip will break BC :-D */ zend_list_delete(Z_LVAL_P(zip)); } /* }}} */ /* {{{ proto resource zip_read(resource zip) Returns the next file in the archive */ static PHP_NAMED_FUNCTION(zif_zip_read) { zval *zip_dp; zip_read_rsrc *zr_rsrc; int ret; zip_rsrc *rsrc_int; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &zip_dp) == FAILURE) { return; } ZEND_FETCH_RESOURCE(rsrc_int, zip_rsrc *, &zip_dp, -1, le_zip_dir_name, le_zip_dir); if (rsrc_int && rsrc_int->za) { if (rsrc_int->index_current >= rsrc_int->num_files) { RETURN_FALSE; } zr_rsrc = emalloc(sizeof(zip_read_rsrc)); ret = zip_stat_index(rsrc_int->za, rsrc_int->index_current, 0, &zr_rsrc->sb); if (ret != 0) { efree(zr_rsrc); RETURN_FALSE; } zr_rsrc->zf = zip_fopen_index(rsrc_int->za, rsrc_int->index_current, 0); if (zr_rsrc->zf) { rsrc_int->index_current++; ZEND_REGISTER_RESOURCE(return_value, zr_rsrc, le_zip_entry); } else { efree(zr_rsrc); RETURN_FALSE; } } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool zip_entry_open(resource zip_dp, resource zip_entry [, string mode]) Open a Zip File, pointed by the resource entry */ /* Dummy function to follow the old API */ static PHP_NAMED_FUNCTION(zif_zip_entry_open) { zval * zip; zval * zip_entry; char *mode = NULL; int mode_len = 0; zip_read_rsrc * zr_rsrc; zip_rsrc *z_rsrc; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr|s", &zip, &zip_entry, &mode, &mode_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(zr_rsrc, zip_read_rsrc *, &zip_entry, -1, le_zip_entry_name, le_zip_entry); ZEND_FETCH_RESOURCE(z_rsrc, zip_rsrc *, &zip, -1, le_zip_dir_name, le_zip_dir); if (zr_rsrc->zf != NULL) { RETURN_TRUE; } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool zip_entry_close(resource zip_ent) Close a zip entry */ static PHP_NAMED_FUNCTION(zif_zip_entry_close) { zval * zip_entry; zip_read_rsrc * zr_rsrc; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &zip_entry) == FAILURE) { return; } ZEND_FETCH_RESOURCE(zr_rsrc, zip_read_rsrc *, &zip_entry, -1, le_zip_entry_name, le_zip_entry); RETURN_BOOL(SUCCESS == zend_list_delete(Z_LVAL_P(zip_entry))); } /* }}} */ /* {{{ proto mixed zip_entry_read(resource zip_entry [, int len]) Read from an open directory entry */ static PHP_NAMED_FUNCTION(zif_zip_entry_read) { zval * zip_entry; long len = 0; zip_read_rsrc * zr_rsrc; char *buffer; int n = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r|l", &zip_entry, &len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(zr_rsrc, zip_read_rsrc *, &zip_entry, -1, le_zip_entry_name, le_zip_entry); if (len <= 0) { len = 1024; } if (zr_rsrc->zf) { buffer = safe_emalloc(len, 1, 1); n = zip_fread(zr_rsrc->zf, buffer, len); if (n > 0) { buffer[n] = 0; RETURN_STRINGL(buffer, n, 0); } else { efree(buffer); RETURN_EMPTY_STRING() } } else { RETURN_FALSE; } } /* }}} */ static void php_zip_entry_get_info(INTERNAL_FUNCTION_PARAMETERS, int opt) /* {{{ */ { zval * zip_entry; zip_read_rsrc * zr_rsrc; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "r", &zip_entry) == FAILURE) { return; } ZEND_FETCH_RESOURCE(zr_rsrc, zip_read_rsrc *, &zip_entry, -1, le_zip_entry_name, le_zip_entry); if (!zr_rsrc->zf) { RETURN_FALSE; } switch (opt) { case 0: RETURN_STRING((char *)zr_rsrc->sb.name, 1); break; case 1: RETURN_LONG((long) (zr_rsrc->sb.comp_size)); break; case 2: RETURN_LONG((long) (zr_rsrc->sb.size)); break; case 3: switch (zr_rsrc->sb.comp_method) { case 0: RETURN_STRING("stored", 1); break; case 1: RETURN_STRING("shrunk", 1); break; case 2: case 3: case 4: case 5: RETURN_STRING("reduced", 1); break; case 6: RETURN_STRING("imploded", 1); break; case 7: RETURN_STRING("tokenized", 1); break; case 8: RETURN_STRING("deflated", 1); break; case 9: RETURN_STRING("deflatedX", 1); break; case 10: RETURN_STRING("implodedX", 1); break; default: RETURN_FALSE; } RETURN_LONG((long) (zr_rsrc->sb.comp_method)); break; } } /* }}} */ /* {{{ proto string zip_entry_name(resource zip_entry) Return the name given a ZZip entry */ static PHP_NAMED_FUNCTION(zif_zip_entry_name) { php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto int zip_entry_compressedsize(resource zip_entry) Return the compressed size of a ZZip entry */ static PHP_NAMED_FUNCTION(zif_zip_entry_compressedsize) { php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ proto int zip_entry_filesize(resource zip_entry) Return the actual filesize of a ZZip entry */ static PHP_NAMED_FUNCTION(zif_zip_entry_filesize) { php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2); } /* }}} */ /* {{{ proto string zip_entry_compressionmethod(resource zip_entry) Return a string containing the compression method used on a particular entry */ static PHP_NAMED_FUNCTION(zif_zip_entry_compressionmethod) { php_zip_entry_get_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3); } /* }}} */ #ifdef PHP_ZIP_USE_OO /* {{{ proto mixed ZipArchive::open(string source [, int flags]) Create new zip using source uri for output, return TRUE on success or the error code */ static ZIPARCHIVE_METHOD(open) { struct zip *intern; char *filename; int filename_len; int err = 0; long flags = 0; char resolved_path[MAXPATHLEN]; zval *this = getThis(); ze_zip_object *ze_obj = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|l", &filename, &filename_len, &flags) == FAILURE) { return; } if (this) { /* We do not use ZIP_FROM_OBJECT, zip init function here */ ze_obj = (ze_zip_object*) zend_object_store_get_object(this TSRMLS_CC); } if (filename_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string as source"); RETURN_FALSE; } if (ZIP_OPENBASEDIR_CHECKPATH(filename)) { RETURN_FALSE; } if (!expand_filepath(filename, resolved_path TSRMLS_CC)) { RETURN_FALSE; } if (ze_obj->za) { /* we already have an opened zip, free it */ if (zip_close(ze_obj->za) != 0) { _zip_free(ze_obj->za); } ze_obj->za = NULL; } if (ze_obj->filename) { efree(ze_obj->filename); ze_obj->filename = NULL; } intern = zip_open(resolved_path, flags, &err); if (!intern || err) { RETURN_LONG((long)err); } ze_obj->filename = estrdup(resolved_path); ze_obj->filename_len = strlen(resolved_path); ze_obj->za = intern; RETURN_TRUE; } /* }}} */ /* {{{ proto bool ZipArchive::close() close the zip archive */ static ZIPARCHIVE_METHOD(close) { struct zip *intern; zval *this = getThis(); ze_zip_object *ze_obj; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); ze_obj = (ze_zip_object*) zend_object_store_get_object(this TSRMLS_CC); if (zip_close(intern)) { RETURN_FALSE; } efree(ze_obj->filename); ze_obj->filename = NULL; ze_obj->filename_len = 0; ze_obj->za = NULL; RETURN_TRUE; } /* }}} */ /* {{{ proto string ZipArchive::getStatusString() * Returns the status error message, system and/or zip messages */ static ZIPARCHIVE_METHOD(getStatusString) { struct zip *intern; zval *this = getThis(); int zep, syp, len; char error_string[128]; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); zip_error_get(intern, &zep, &syp); len = zip_error_to_str(error_string, 128, zep, syp); RETVAL_STRINGL(error_string, len, 1); } /* }}} */ /* {{{ proto bool ZipArchive::createEmptyDir(string dirname) Returns the index of the entry named filename in the archive */ static ZIPARCHIVE_METHOD(addEmptyDir) { struct zip *intern; zval *this = getThis(); char *dirname; int dirname_len; int idx; struct zip_stat sb; char *s; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &dirname, &dirname_len) == FAILURE) { return; } if (dirname_len<1) { RETURN_FALSE; } if (dirname[dirname_len-1] != '/') { s=(char *)emalloc(dirname_len+2); strcpy(s, dirname); s[dirname_len] = '/'; s[dirname_len+1] = '\0'; } else { s = dirname; } idx = zip_stat(intern, s, 0, &sb); if (idx >= 0) { RETVAL_FALSE; } else { if (zip_add_dir(intern, (const char *)s) == -1) { RETVAL_FALSE; } RETVAL_TRUE; } if (s != dirname) { efree(s); } } /* }}} */ static void php_zip_add_from_pattern(INTERNAL_FUNCTION_PARAMETERS, int type) /* {{{ */ { struct zip *intern; zval *this = getThis(); char *pattern; char *path = NULL; char *remove_path = NULL; char *add_path = NULL; int pattern_len, add_path_len = 0, remove_path_len = 0, path_len = 0; long remove_all_path = 0; long flags = 0; zval *options = NULL; int found; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); /* 1 == glob, 2==pcre */ if (type == 1) { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|la", &pattern, &pattern_len, &flags, &options) == FAILURE) { return; } } else { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sa", &pattern, &pattern_len, &path, &path_len, &options) == FAILURE) { return; } } if (pattern_len == 0) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as pattern"); RETURN_FALSE; } if (options && (php_zip_parse_options(options, &remove_all_path, &remove_path, &remove_path_len, &add_path, &add_path_len TSRMLS_CC) < 0)) { RETURN_FALSE; } if (remove_path && remove_path_len > 1 && (remove_path[strlen(remove_path) - 1] == '/' || remove_path[strlen(remove_path) - 1] == '\\')) { remove_path[strlen(remove_path) - 1] = '\0'; } if (type == 1) { found = php_zip_glob(pattern, pattern_len, flags, return_value TSRMLS_CC); } else { found = php_zip_pcre(pattern, pattern_len, path, path_len, return_value TSRMLS_CC); } if (found > 0) { int i; zval **zval_file = NULL; for (i = 0; i < found; i++) { char *file, *file_stripped, *entry_name; size_t entry_name_len, file_stripped_len; char entry_name_buf[MAXPATHLEN]; char *basename = NULL; if (zend_hash_index_find(Z_ARRVAL_P(return_value), i, (void **) &zval_file) == SUCCESS) { file = Z_STRVAL_PP(zval_file); if (remove_all_path) { php_basename(Z_STRVAL_PP(zval_file), Z_STRLEN_PP(zval_file), NULL, 0, &basename, (size_t *)&file_stripped_len TSRMLS_CC); file_stripped = basename; } else if (remove_path && strstr(Z_STRVAL_PP(zval_file), remove_path) != NULL) { file_stripped = Z_STRVAL_PP(zval_file) + remove_path_len + 1; file_stripped_len = Z_STRLEN_PP(zval_file) - remove_path_len - 1; } else { file_stripped = Z_STRVAL_PP(zval_file); file_stripped_len = Z_STRLEN_PP(zval_file); } if (add_path) { if ((add_path_len + file_stripped_len) > MAXPATHLEN) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Entry name too long (max: %d, %ld given)", MAXPATHLEN - 1, (add_path_len + file_stripped_len)); zval_dtor(return_value); RETURN_FALSE; } snprintf(entry_name_buf, MAXPATHLEN, "%s%s", add_path, file_stripped); entry_name = entry_name_buf; entry_name_len = strlen(entry_name); } else { entry_name = Z_STRVAL_PP(zval_file); entry_name_len = Z_STRLEN_PP(zval_file); } if (basename) { efree(basename); basename = NULL; } if (php_zip_add_file(intern, Z_STRVAL_PP(zval_file), Z_STRLEN_PP(zval_file), entry_name, entry_name_len, 0, 0 TSRMLS_CC) < 0) { zval_dtor(return_value); RETURN_FALSE; } } } } } /* }}} */ /* {{{ proto bool ZipArchive::addGlob(string pattern[,int flags [, array options]]) Add files matching the glob pattern. See php's glob for the pattern syntax. */ static ZIPARCHIVE_METHOD(addGlob) { php_zip_add_from_pattern(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ proto bool ZipArchive::addPattern(string pattern[, string path [, array options]]) Add files matching the pcre pattern. See php's pcre for the pattern syntax. */ static ZIPARCHIVE_METHOD(addPattern) { php_zip_add_from_pattern(INTERNAL_FUNCTION_PARAM_PASSTHRU, 2); } /* }}} */ /* {{{ proto bool ZipArchive::addFile(string filepath[, string entryname[, int start [, int length]]]) Add a file in a Zip archive using its path and the name to use. */ static ZIPARCHIVE_METHOD(addFile) { struct zip *intern; zval *this = getThis(); char *filename; int filename_len; char *entry_name = NULL; int entry_name_len = 0; long offset_start = 0, offset_len = 0; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sll", &filename, &filename_len, &entry_name, &entry_name_len, &offset_start, &offset_len) == FAILURE) { return; } if (filename_len == 0) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as filename"); RETURN_FALSE; } if (entry_name_len == 0) { entry_name = filename; entry_name_len = filename_len; } if (php_zip_add_file(intern, filename, filename_len, entry_name, entry_name_len, 0, 0 TSRMLS_CC) < 0) { RETURN_FALSE; } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto bool ZipArchive::addFromString(string name, string content) Add a file using content and the entry name */ static ZIPARCHIVE_METHOD(addFromString) { struct zip *intern; zval *this = getThis(); char *buffer, *name; int buffer_len, name_len; ze_zip_object *ze_obj; struct zip_source *zs; int pos = 0; int cur_idx; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss", &name, &name_len, &buffer, &buffer_len) == FAILURE) { return; } ze_obj = (ze_zip_object*) zend_object_store_get_object(this TSRMLS_CC); if (ze_obj->buffers_cnt) { ze_obj->buffers = (char **)erealloc(ze_obj->buffers, sizeof(char *) * (ze_obj->buffers_cnt+1)); pos = ze_obj->buffers_cnt++; } else { ze_obj->buffers = (char **)emalloc(sizeof(char *)); ze_obj->buffers_cnt++; pos = 0; } ze_obj->buffers[pos] = (char *)emalloc(buffer_len + 1); memcpy(ze_obj->buffers[pos], buffer, buffer_len + 1); zs = zip_source_buffer(intern, ze_obj->buffers[pos], buffer_len, 0); if (zs == NULL) { RETURN_FALSE; } cur_idx = zip_name_locate(intern, (const char *)name, 0); /* TODO: fix _zip_replace */ if (cur_idx >= 0) { if (zip_delete(intern, cur_idx) == -1) { goto fail; } } if (zip_add(intern, name, zs) != -1) { RETURN_TRUE; } fail: zip_source_free(zs); RETURN_FALSE; } /* }}} */ /* {{{ proto array ZipArchive::statName(string filename[, int flags]) Returns the information about a the zip entry filename */ static ZIPARCHIVE_METHOD(statName) { struct zip *intern; zval *this = getThis(); char *name; int name_len; long flags = 0; struct zip_stat sb; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|l", &name, &name_len, &flags) == FAILURE) { return; } PHP_ZIP_STAT_PATH(intern, name, name_len, flags, sb); RETURN_SB(&sb); } /* }}} */ /* {{{ proto resource ZipArchive::statIndex(int index[, int flags]) Returns the zip entry informations using its index */ static ZIPARCHIVE_METHOD(statIndex) { struct zip *intern; zval *this = getThis(); long index, flags = 0; struct zip_stat sb; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &index, &flags) == FAILURE) { return; } if (zip_stat_index(intern, index, flags, &sb) != 0) { RETURN_FALSE; } RETURN_SB(&sb); } /* }}} */ /* {{{ proto int ZipArchive::locateName(string filename[, int flags]) Returns the index of the entry named filename in the archive */ static ZIPARCHIVE_METHOD(locateName) { struct zip *intern; zval *this = getThis(); char *name; int name_len; long flags = 0; long idx = -1; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|l", &name, &name_len, &flags) == FAILURE) { return; } if (name_len<1) { RETURN_FALSE; } idx = (long)zip_name_locate(intern, (const char *)name, flags); if (idx >= 0) { RETURN_LONG(idx); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto string ZipArchive::getNameIndex(int index [, int flags]) Returns the name of the file at position index */ static ZIPARCHIVE_METHOD(getNameIndex) { struct zip *intern; zval *this = getThis(); const char *name; long flags = 0, index = 0; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &index, &flags) == FAILURE) { return; } name = zip_get_name(intern, (int) index, flags); if (name) { RETVAL_STRING((char *)name, 1); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool ZipArchive::setArchiveComment(string comment) Set or remove (NULL/'') the comment of the archive */ static ZIPARCHIVE_METHOD(setArchiveComment) { struct zip *intern; zval *this = getThis(); int comment_len; char * comment; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &comment, &comment_len) == FAILURE) { return; } if (zip_set_archive_comment(intern, (const char *)comment, (int)comment_len)) { RETURN_FALSE; } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto string ZipArchive::getArchiveComment([int flags]) Returns the comment of an entry using its index */ static ZIPARCHIVE_METHOD(getArchiveComment) { struct zip *intern; zval *this = getThis(); long flags = 0; const char * comment; int comment_len = 0; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &flags) == FAILURE) { return; } comment = zip_get_archive_comment(intern, &comment_len, (int)flags); if(comment==NULL) { RETURN_FALSE; } RETURN_STRINGL((char *)comment, (long)comment_len, 1); } /* }}} */ /* {{{ proto bool ZipArchive::setCommentName(string name, string comment) Set or remove (NULL/'') the comment of an entry using its Name */ static ZIPARCHIVE_METHOD(setCommentName) { struct zip *intern; zval *this = getThis(); int comment_len, name_len; char * comment, *name; int idx; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss", &name, &name_len, &comment, &comment_len) == FAILURE) { return; } if (name_len < 1) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as entry name"); } idx = zip_name_locate(intern, name, 0); if (idx < 0) { RETURN_FALSE; } PHP_ZIP_SET_FILE_COMMENT(intern, idx, comment, comment_len); } /* }}} */ /* {{{ proto bool ZipArchive::setCommentIndex(int index, string comment) Set or remove (NULL/'') the comment of an entry using its index */ static ZIPARCHIVE_METHOD(setCommentIndex) { struct zip *intern; zval *this = getThis(); long index; int comment_len; char * comment; struct zip_stat sb; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ls", &index, &comment, &comment_len) == FAILURE) { return; } PHP_ZIP_STAT_INDEX(intern, index, 0, sb); PHP_ZIP_SET_FILE_COMMENT(intern, index, comment, comment_len); } /* }}} */ /* {{{ proto string ZipArchive::getCommentName(string name[, int flags]) Returns the comment of an entry using its name */ static ZIPARCHIVE_METHOD(getCommentName) { struct zip *intern; zval *this = getThis(); int name_len, idx; long flags = 0; int comment_len = 0; const char * comment; char *name; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &name, &name_len, &flags) == FAILURE) { return; } if (name_len < 1) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as entry name"); RETURN_FALSE; } idx = zip_name_locate(intern, name, 0); if (idx < 0) { RETURN_FALSE; } comment = zip_get_file_comment(intern, idx, &comment_len, (int)flags); RETURN_STRINGL((char *)comment, (long)comment_len, 1); } /* }}} */ /* {{{ proto string ZipArchive::getCommentIndex(int index[, int flags]) Returns the comment of an entry using its index */ static ZIPARCHIVE_METHOD(getCommentIndex) { struct zip *intern; zval *this = getThis(); long index, flags = 0; const char * comment; int comment_len = 0; struct zip_stat sb; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &index, &flags) == FAILURE) { return; } PHP_ZIP_STAT_INDEX(intern, index, 0, sb); comment = zip_get_file_comment(intern, index, &comment_len, (int)flags); RETURN_STRINGL((char *)comment, (long)comment_len, 1); } /* }}} */ /* {{{ proto bool ZipArchive::deleteIndex(int index) Delete a file using its index */ static ZIPARCHIVE_METHOD(deleteIndex) { struct zip *intern; zval *this = getThis(); long index; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &index) == FAILURE) { return; } if (index < 0) { RETURN_FALSE; } if (zip_delete(intern, index) < 0) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool ZipArchive::deleteName(string name) Delete a file using its index */ static ZIPARCHIVE_METHOD(deleteName) { struct zip *intern; zval *this = getThis(); int name_len; char *name; struct zip_stat sb; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name, &name_len) == FAILURE) { return; } if (name_len < 1) { RETURN_FALSE; } PHP_ZIP_STAT_PATH(intern, name, name_len, 0, sb); if (zip_delete(intern, sb.index)) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool ZipArchive::renameIndex(int index, string new_name) Rename an entry selected by its index to new_name */ static ZIPARCHIVE_METHOD(renameIndex) { struct zip *intern; zval *this = getThis(); char *new_name; int new_name_len; long index; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ls", &index, &new_name, &new_name_len) == FAILURE) { return; } if (index < 0) { RETURN_FALSE; } if (new_name_len < 1) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as new entry name"); RETURN_FALSE; } if (zip_rename(intern, index, (const char *)new_name) != 0) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool ZipArchive::renameName(string name, string new_name) Rename an entry selected by its name to new_name */ static ZIPARCHIVE_METHOD(renameName) { struct zip *intern; zval *this = getThis(); struct zip_stat sb; char *name, *new_name; int name_len, new_name_len; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss", &name, &name_len, &new_name, &new_name_len) == FAILURE) { return; } if (new_name_len < 1) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string as new entry name"); RETURN_FALSE; } PHP_ZIP_STAT_PATH(intern, name, name_len, 0, sb); if (zip_rename(intern, sb.index, (const char *)new_name)) { RETURN_FALSE; } RETURN_TRUE; } /* }}} */ /* {{{ proto bool ZipArchive::unchangeIndex(int index) Changes to the file at position index are reverted */ static ZIPARCHIVE_METHOD(unchangeIndex) { struct zip *intern; zval *this = getThis(); long index; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &index) == FAILURE) { return; } if (index < 0) { RETURN_FALSE; } if (zip_unchange(intern, index) != 0) { RETURN_FALSE; } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto bool ZipArchive::unchangeName(string name) Changes to the file named 'name' are reverted */ static ZIPARCHIVE_METHOD(unchangeName) { struct zip *intern; zval *this = getThis(); struct zip_stat sb; char *name; int name_len; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name, &name_len) == FAILURE) { return; } if (name_len < 1) { RETURN_FALSE; } PHP_ZIP_STAT_PATH(intern, name, name_len, 0, sb); if (zip_unchange(intern, sb.index) != 0) { RETURN_FALSE; } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto bool ZipArchive::unchangeAll() All changes to files and global information in archive are reverted */ static ZIPARCHIVE_METHOD(unchangeAll) { struct zip *intern; zval *this = getThis(); if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zip_unchange_all(intern) != 0) { RETURN_FALSE; } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto bool ZipArchive::unchangeArchive() Revert all global changes to the archive archive. For now, this only reverts archive comment changes. */ static ZIPARCHIVE_METHOD(unchangeArchive) { struct zip *intern; zval *this = getThis(); if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zip_unchange_archive(intern) != 0) { RETURN_FALSE; } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto bool ZipArchive::extractTo(string pathto[, mixed files]) Extract one or more file from a zip archive */ /* TODO: * - allow index or array of indeces * - replace path * - patterns */ static ZIPARCHIVE_METHOD(extractTo) { struct zip *intern; zval *this = getThis(); zval *zval_files = NULL; zval **zval_file = NULL; php_stream_statbuf ssb; char *pathto; int pathto_len; int ret, i; int nelems; if (!this) { RETURN_FALSE; } if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|z", &pathto, &pathto_len, &zval_files) == FAILURE) { return; } if (pathto_len < 1) { RETURN_FALSE; } if (php_stream_stat_path_ex(pathto, PHP_STREAM_URL_STAT_QUIET, &ssb, NULL) < 0) { ret = php_stream_mkdir(pathto, 0777, PHP_STREAM_MKDIR_RECURSIVE, NULL); if (!ret) { RETURN_FALSE; } } ZIP_FROM_OBJECT(intern, this); if (zval_files && (Z_TYPE_P(zval_files) != IS_NULL)) { switch (Z_TYPE_P(zval_files)) { case IS_STRING: if (!php_zip_extract_file(intern, pathto, Z_STRVAL_P(zval_files), Z_STRLEN_P(zval_files) TSRMLS_CC)) { RETURN_FALSE; } break; case IS_ARRAY: nelems = zend_hash_num_elements(Z_ARRVAL_P(zval_files)); if (nelems == 0 ) { RETURN_FALSE; } for (i = 0; i < nelems; i++) { if (zend_hash_index_find(Z_ARRVAL_P(zval_files), i, (void **) &zval_file) == SUCCESS) { switch (Z_TYPE_PP(zval_file)) { case IS_LONG: break; case IS_STRING: if (!php_zip_extract_file(intern, pathto, Z_STRVAL_PP(zval_file), Z_STRLEN_PP(zval_file) TSRMLS_CC)) { RETURN_FALSE; } break; } } } break; case IS_LONG: default: php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid argument, expect string or array of strings"); break; } } else { /* Extract all files */ int filecount = zip_get_num_files(intern); if (filecount == -1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Illegal archive"); RETURN_FALSE; } for (i = 0; i < filecount; i++) { char *file = (char*)zip_get_name(intern, i, ZIP_FL_UNCHANGED); if (!php_zip_extract_file(intern, pathto, file, strlen(file) TSRMLS_CC)) { RETURN_FALSE; } } } RETURN_TRUE; } /* }}} */ static void php_zip_get_from(INTERNAL_FUNCTION_PARAMETERS, int type) /* {{{ */ { struct zip *intern; zval *this = getThis(); struct zip_stat sb; struct zip_file *zf; char *filename; int filename_len; long index = -1; long flags = 0; long len = 0; char *buffer; int n = 0; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (type == 1) { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|ll", &filename, &filename_len, &len, &flags) == FAILURE) { return; } PHP_ZIP_STAT_PATH(intern, filename, filename_len, flags, sb); } else { if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|ll", &index, &len, &flags) == FAILURE) { return; } PHP_ZIP_STAT_INDEX(intern, index, 0, sb); } if (sb.size < 1) { RETURN_EMPTY_STRING(); } if (len < 1) { len = sb.size; } if (index >= 0) { zf = zip_fopen_index(intern, index, flags); } else { zf = zip_fopen(intern, filename, flags); } if (zf == NULL) { RETURN_FALSE; } buffer = safe_emalloc(len, 1, 2); n = zip_fread(zf, buffer, len); if (n < 1) { efree(buffer); RETURN_EMPTY_STRING(); } zip_fclose(zf); buffer[n] = 0; RETURN_STRINGL(buffer, n, 0); } /* }}} */ /* {{{ proto string ZipArchive::getFromName(string entryname[, int len [, int flags]]) get the contents of an entry using its name */ static ZIPARCHIVE_METHOD(getFromName) { php_zip_get_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, 1); } /* }}} */ /* {{{ proto string ZipArchive::getFromIndex(int index[, int len [, int flags]]) get the contents of an entry using its index */ static ZIPARCHIVE_METHOD(getFromIndex) { php_zip_get_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto resource ZipArchive::getStream(string entryname) get a stream for an entry using its name */ static ZIPARCHIVE_METHOD(getStream) { struct zip *intern; zval *this = getThis(); struct zip_stat sb; char *filename; int filename_len; char *mode = "rb"; php_stream *stream; ze_zip_object *obj; if (!this) { RETURN_FALSE; } ZIP_FROM_OBJECT(intern, this); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &filename, &filename_len) == FAILURE) { return; } if (zip_stat(intern, filename, 0, &sb) != 0) { RETURN_FALSE; } obj = (ze_zip_object*) zend_object_store_get_object(this TSRMLS_CC); stream = php_stream_zip_open(obj->filename, filename, mode STREAMS_CC TSRMLS_CC); if (stream) { php_stream_to_zval(stream, return_value); } } /* }}} */ /* {{{ arginfo */ ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_open, 0, 0, 1) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_ziparchive__void, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addemptydir, 0, 0, 1) ZEND_ARG_INFO(0, dirname) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addglob, 0, 0, 1) ZEND_ARG_INFO(0, pattern) ZEND_ARG_INFO(0, flags) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addpattern, 0, 0, 1) ZEND_ARG_INFO(0, pattern) ZEND_ARG_INFO(0, path) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addfile, 0, 0, 1) ZEND_ARG_INFO(0, filepath) ZEND_ARG_INFO(0, entryname) ZEND_ARG_INFO(0, start) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_addfromstring, 0, 0, 2) ZEND_ARG_INFO(0, name) ZEND_ARG_INFO(0, content) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_statname, 0, 0, 1) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_statindex, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setarchivecomment, 0, 0, 1) ZEND_ARG_INFO(0, comment) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setcommentindex, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, comment) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getcommentname, 0, 0, 1) ZEND_ARG_INFO(0, name) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getcommentindex, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_renameindex, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, new_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_renamename, 0, 0, 2) ZEND_ARG_INFO(0, name) ZEND_ARG_INFO(0, new_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_unchangeindex, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_unchangename, 0, 0, 1) ZEND_ARG_INFO(0, name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_extractto, 0, 0, 1) ZEND_ARG_INFO(0, pathto) ZEND_ARG_INFO(0, files) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getfromname, 0, 0, 1) ZEND_ARG_INFO(0, entryname) ZEND_ARG_INFO(0, len) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getfromindex, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, len) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getarchivecomment, 0, 0, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_setcommentname, 0, 0, 2) ZEND_ARG_INFO(0, name) ZEND_ARG_INFO(0, comment) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_ziparchive_getstream, 0, 0, 1) ZEND_ARG_INFO(0, entryname) ZEND_END_ARG_INFO() /* }}} */ /* {{{ ze_zip_object_class_functions */ static const zend_function_entry zip_class_functions[] = { ZIPARCHIVE_ME(open, arginfo_ziparchive_open, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(close, arginfo_ziparchive__void, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getStatusString, arginfo_ziparchive__void, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(addEmptyDir, arginfo_ziparchive_addemptydir, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(addFromString, arginfo_ziparchive_addfromstring, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(addFile, arginfo_ziparchive_addfile, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(addGlob, arginfo_ziparchive_addglob, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(addPattern, arginfo_ziparchive_addpattern, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(renameIndex, arginfo_ziparchive_renameindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(renameName, arginfo_ziparchive_renamename, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(setArchiveComment, arginfo_ziparchive_setarchivecomment, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getArchiveComment, arginfo_ziparchive_getarchivecomment, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(setCommentIndex, arginfo_ziparchive_setcommentindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(setCommentName, arginfo_ziparchive_setcommentname, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getCommentIndex, arginfo_ziparchive_getcommentindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getCommentName, arginfo_ziparchive_getcommentname, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(deleteIndex, arginfo_ziparchive_unchangeindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(deleteName, arginfo_ziparchive_unchangename, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(statName, arginfo_ziparchive_statname, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(statIndex, arginfo_ziparchive_statindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(locateName, arginfo_ziparchive_statname, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getNameIndex, arginfo_ziparchive_statindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(unchangeArchive, arginfo_ziparchive__void, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(unchangeAll, arginfo_ziparchive__void, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(unchangeIndex, arginfo_ziparchive_unchangeindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(unchangeName, arginfo_ziparchive_unchangename, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(extractTo, arginfo_ziparchive_extractto, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getFromName, arginfo_ziparchive_getfromname, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getFromIndex, arginfo_ziparchive_getfromindex, ZEND_ACC_PUBLIC) ZIPARCHIVE_ME(getStream, arginfo_ziparchive_getstream, ZEND_ACC_PUBLIC) {NULL, NULL, NULL} }; /* }}} */ #endif /* {{{ PHP_MINIT_FUNCTION */ static PHP_MINIT_FUNCTION(zip) { #ifdef PHP_ZIP_USE_OO zend_class_entry ce; memcpy(&zip_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); zip_object_handlers.clone_obj = NULL; zip_object_handlers.get_property_ptr_ptr = php_zip_get_property_ptr_ptr; zip_object_handlers.get_gc = php_zip_get_gc; zip_object_handlers.get_properties = php_zip_get_properties; zip_object_handlers.read_property = php_zip_read_property; zip_object_handlers.has_property = php_zip_has_property; INIT_CLASS_ENTRY(ce, "ZipArchive", zip_class_functions); ce.create_object = php_zip_object_new; zip_class_entry = zend_register_internal_class(&ce TSRMLS_CC); zend_hash_init(&zip_prop_handlers, 0, NULL, NULL, 1); php_zip_register_prop_handler(&zip_prop_handlers, "status", php_zip_status, NULL, NULL, IS_LONG TSRMLS_CC); php_zip_register_prop_handler(&zip_prop_handlers, "statusSys", php_zip_status_sys, NULL, NULL, IS_LONG TSRMLS_CC); php_zip_register_prop_handler(&zip_prop_handlers, "numFiles", php_zip_get_num_files, NULL, NULL, IS_LONG TSRMLS_CC); php_zip_register_prop_handler(&zip_prop_handlers, "filename", NULL, NULL, php_zipobj_get_filename, IS_STRING TSRMLS_CC); php_zip_register_prop_handler(&zip_prop_handlers, "comment", NULL, php_zipobj_get_zip_comment, NULL, IS_STRING TSRMLS_CC); REGISTER_ZIP_CLASS_CONST_LONG("CREATE", ZIP_CREATE); REGISTER_ZIP_CLASS_CONST_LONG("EXCL", ZIP_EXCL); REGISTER_ZIP_CLASS_CONST_LONG("CHECKCONS", ZIP_CHECKCONS); REGISTER_ZIP_CLASS_CONST_LONG("OVERWRITE", ZIP_OVERWRITE); REGISTER_ZIP_CLASS_CONST_LONG("FL_NOCASE", ZIP_FL_NOCASE); REGISTER_ZIP_CLASS_CONST_LONG("FL_NODIR", ZIP_FL_NODIR); REGISTER_ZIP_CLASS_CONST_LONG("FL_COMPRESSED", ZIP_FL_COMPRESSED); REGISTER_ZIP_CLASS_CONST_LONG("FL_UNCHANGED", ZIP_FL_UNCHANGED); REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFAULT", ZIP_CM_DEFAULT); REGISTER_ZIP_CLASS_CONST_LONG("CM_STORE", ZIP_CM_STORE); REGISTER_ZIP_CLASS_CONST_LONG("CM_SHRINK", ZIP_CM_SHRINK); REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_1", ZIP_CM_REDUCE_1); REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_2", ZIP_CM_REDUCE_2); REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_3", ZIP_CM_REDUCE_3); REGISTER_ZIP_CLASS_CONST_LONG("CM_REDUCE_4", ZIP_CM_REDUCE_4); REGISTER_ZIP_CLASS_CONST_LONG("CM_IMPLODE", ZIP_CM_IMPLODE); REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFLATE", ZIP_CM_DEFLATE); REGISTER_ZIP_CLASS_CONST_LONG("CM_DEFLATE64", ZIP_CM_DEFLATE64); REGISTER_ZIP_CLASS_CONST_LONG("CM_PKWARE_IMPLODE", ZIP_CM_PKWARE_IMPLODE); REGISTER_ZIP_CLASS_CONST_LONG("CM_BZIP2", ZIP_CM_BZIP2); REGISTER_ZIP_CLASS_CONST_LONG("CM_LZMA", ZIP_CM_LZMA); REGISTER_ZIP_CLASS_CONST_LONG("CM_TERSE", ZIP_CM_TERSE); REGISTER_ZIP_CLASS_CONST_LONG("CM_LZ77", ZIP_CM_LZ77); REGISTER_ZIP_CLASS_CONST_LONG("CM_WAVPACK", ZIP_CM_WAVPACK); REGISTER_ZIP_CLASS_CONST_LONG("CM_PPMD", ZIP_CM_PPMD); /* Error code */ REGISTER_ZIP_CLASS_CONST_LONG("ER_OK", ZIP_ER_OK); /* N No error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_MULTIDISK", ZIP_ER_MULTIDISK); /* N Multi-disk zip archives not supported */ REGISTER_ZIP_CLASS_CONST_LONG("ER_RENAME", ZIP_ER_RENAME); /* S Renaming temporary file failed */ REGISTER_ZIP_CLASS_CONST_LONG("ER_CLOSE", ZIP_ER_CLOSE); /* S Closing zip archive failed */ REGISTER_ZIP_CLASS_CONST_LONG("ER_SEEK", ZIP_ER_SEEK); /* S Seek error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_READ", ZIP_ER_READ); /* S Read error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_WRITE", ZIP_ER_WRITE); /* S Write error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_CRC", ZIP_ER_CRC); /* N CRC error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_ZIPCLOSED", ZIP_ER_ZIPCLOSED); /* N Containing zip archive was closed */ REGISTER_ZIP_CLASS_CONST_LONG("ER_NOENT", ZIP_ER_NOENT); /* N No such file */ REGISTER_ZIP_CLASS_CONST_LONG("ER_EXISTS", ZIP_ER_EXISTS); /* N File already exists */ REGISTER_ZIP_CLASS_CONST_LONG("ER_OPEN", ZIP_ER_OPEN); /* S Can't open file */ REGISTER_ZIP_CLASS_CONST_LONG("ER_TMPOPEN", ZIP_ER_TMPOPEN); /* S Failure to create temporary file */ REGISTER_ZIP_CLASS_CONST_LONG("ER_ZLIB", ZIP_ER_ZLIB); /* Z Zlib error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_MEMORY", ZIP_ER_MEMORY); /* N Malloc failure */ REGISTER_ZIP_CLASS_CONST_LONG("ER_CHANGED", ZIP_ER_CHANGED); /* N Entry has been changed */ REGISTER_ZIP_CLASS_CONST_LONG("ER_COMPNOTSUPP", ZIP_ER_COMPNOTSUPP);/* N Compression method not supported */ REGISTER_ZIP_CLASS_CONST_LONG("ER_EOF", ZIP_ER_EOF); /* N Premature EOF */ REGISTER_ZIP_CLASS_CONST_LONG("ER_INVAL", ZIP_ER_INVAL); /* N Invalid argument */ REGISTER_ZIP_CLASS_CONST_LONG("ER_NOZIP", ZIP_ER_NOZIP); /* N Not a zip archive */ REGISTER_ZIP_CLASS_CONST_LONG("ER_INTERNAL", ZIP_ER_INTERNAL); /* N Internal error */ REGISTER_ZIP_CLASS_CONST_LONG("ER_INCONS", ZIP_ER_INCONS); /* N Zip archive inconsistent */ REGISTER_ZIP_CLASS_CONST_LONG("ER_REMOVE", ZIP_ER_REMOVE); /* S Can't remove file */ REGISTER_ZIP_CLASS_CONST_LONG("ER_DELETED", ZIP_ER_DELETED); /* N Entry has been deleted */ php_register_url_stream_wrapper("zip", &php_stream_zip_wrapper TSRMLS_CC); #endif le_zip_dir = zend_register_list_destructors_ex(php_zip_free_dir, NULL, le_zip_dir_name, module_number); le_zip_entry = zend_register_list_destructors_ex(php_zip_free_entry, NULL, le_zip_entry_name, module_number); return SUCCESS; } /* }}} */ /* {{{ PHP_MSHUTDOWN_FUNCTION */ static PHP_MSHUTDOWN_FUNCTION(zip) { #ifdef PHP_ZIP_USE_OO zend_hash_destroy(&zip_prop_handlers); php_unregister_url_stream_wrapper("zip" TSRMLS_CC); #endif return SUCCESS; } /* }}} */ /* {{{ PHP_MINFO_FUNCTION */ static PHP_MINFO_FUNCTION(zip) { php_info_print_table_start(); php_info_print_table_row(2, "Zip", "enabled"); php_info_print_table_row(2, "Extension Version","$Id$"); php_info_print_table_row(2, "Zip version", PHP_ZIP_VERSION_STRING); php_info_print_table_row(2, "Libzip version", LIBZIP_VERSION); php_info_print_table_end(); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-416/c/good_5173_1
crossvul-cpp_data_good_2515_2
/* * $LynxId: HTML.c,v 1.174 2017/07/05 22:48:09 tom Exp $ * * Structured stream to Rich hypertext converter * ============================================ * * This generates a hypertext object. It converts from the * structured stream interface of HTML events into the style- * oriented interface of the HText.h interface. This module is * only used in clients and should not be linked into servers. * * Override this module if making a new GUI browser. * * Being Overidden * */ #define HTSTREAM_INTERNAL 1 #include <HTUtils.h> #define Lynx_HTML_Handler #include <HTChunk.h> #include <HText.h> #include <HTStyle.h> #include <HTML.h> #include <HTCJK.h> #include <HTAtom.h> #include <HTAnchor.h> #include <HTMLGen.h> #include <HTParse.h> #include <HTList.h> #include <UCMap.h> #include <UCDefs.h> #include <UCAux.h> #include <LYGlobalDefs.h> #include <LYCharUtils.h> #include <LYCharSets.h> #include <HTAlert.h> #include <HTForms.h> #include <HTNestedList.h> #include <GridText.h> #include <LYStrings.h> #include <LYUtils.h> #include <LYMap.h> #include <LYList.h> #include <LYBookmark.h> #include <LYHistory.h> #ifdef VMS #include <LYCurses.h> #endif /* VMS */ #ifdef USE_PRETTYSRC #include <LYPrettySrc.h> #endif #ifdef USE_COLOR_STYLE #include <SGML.h> #include <AttrList.h> #include <LYHash.h> #include <LYStyle.h> #undef SELECTED_STYLES #define pHText_changeStyle(X,Y,Z) {} #if OMIT_SCN_KEEPING # define HCODE_TO_STACK_OFF(x) /*(CSHASHSIZE+1)*/ 88888 /*special value. */ #else # define HCODE_TO_STACK_OFF(x) x /*pass computed value */ #endif #endif /* USE_COLOR_STYLE */ #ifdef USE_SOURCE_CACHE #include <HTAccess.h> #endif #include <LYCurses.h> #include <LYJustify.h> #include <LYexit.h> #include <LYLeaks.h> #define STACKLEVEL(me) ((me->stack + MAX_NESTING - 1) - me->sp) #define DFT_TEXTAREA_COLS 60 #define DFT_TEXTAREA_ROWS 4 #define MAX_TEXTAREA_COLS LYcolLimit #define MAX_TEXTAREA_ROWS (3 * LYlines) #define LimitValue(name, value) \ if (name > value) { \ CTRACE((tfp, "Limited " #name " to %d, was %d\n", \ value, name)); \ name = value; \ } struct _HTStream { const HTStreamClass *isa; #ifdef USE_SOURCE_CACHE HTParentAnchor *anchor; FILE *fp; char *filename; HTChunk *chunk; HTChunk *last_chunk; /* the last chunk in a chain! */ const HTStreamClass *actions; HTStream *target; int status; #else /* .... */ #endif }; static HTStyleSheet *styleSheet = NULL; /* Application-wide */ /* Module-wide style cache */ static HTStyle *styles[HTML_ELEMENTS + LYNX_HTML_EXTRA_ELEMENTS]; /* adding 24 nested list styles */ /* and 3 header alignment styles */ /* and 3 div alignment styles */ static HTStyle *default_style = NULL; const char *LYToolbarName = "LynxPseudoToolbar"; /* used to turn off a style if the HTML author forgot to static int i_prior_style = -1; */ /* * Private function.... */ static int HTML_end_element(HTStructured * me, int element_number, char **include); static int HTML_start_element(HTStructured * me, int element_number, const BOOL *present, STRING2PTR value, int tag_charset, char **include); /* * If we have verbose_img set, display labels for images. */ #define VERBOSE_IMG(value,src_type,string) \ ((verbose_img) ? (newtitle = MakeNewTitle(value,src_type)): string) static char *MakeNewTitle(STRING2PTR value, int src_type); static char *MakeNewImageValue(STRING2PTR value); static char *MakeNewMapValue(STRING2PTR value, const char *mapstr); /* Set an internal flag that the next call to a stack-affecting method * is only internal and the stack manipulation should be skipped. - kw */ #define SET_SKIP_STACK(el_num) if (HTML_dtd.tags[el_num].contents != SGML_EMPTY) \ { me->skip_stack++; } void strtolower(char *i) { if (!i) return; while (*i) { *i = (char) TOLOWER(*i); i++; } } /* Flattening the style structure * ------------------------------ * * On the NeXT, and on any read-only browser, it is simpler for the text to * have a sequence of styles, rather than a nested tree of styles. In this * case we have to flatten the structure as it arrives from SGML tags into a * sequence of styles. */ /* * If style really needs to be set, call this. */ void actually_set_style(HTStructured * me) { if (!me->text) { /* First time through */ LYGetChartransInfo(me); UCSetTransParams(&me->T, me->UCLYhndl, me->UCI, HTAnchor_getUCLYhndl(me->node_anchor, UCT_STAGE_HTEXT), HTAnchor_getUCInfoStage(me->node_anchor, UCT_STAGE_HTEXT)); me->text = HText_new2(me->node_anchor, me->target); HText_beginAppend(me->text); HText_setStyle(me->text, me->new_style); me->in_word = NO; LYCheckForContentBase(me); } else { HText_setStyle(me->text, me->new_style); } me->old_style = me->new_style; me->style_change = NO; } /* * If you THINK you need to change style, call this. */ static void change_paragraph_style(HTStructured * me, HTStyle *style) { if (me->new_style != style) { me->style_change = YES; me->new_style = style; } me->in_word = NO; } /* * Return true if we should write a message (to LYNXMESSAGES, or the trace * file) telling about some bad HTML that we've found. */ BOOL LYBadHTML(HTStructured * me) { BOOL code = FALSE; switch ((enumBadHtml) cfg_bad_html) { case BAD_HTML_IGNORE: break; case BAD_HTML_TRACE: code = TRUE; break; case BAD_HTML_MESSAGE: code = TRUE; break; case BAD_HTML_WARN: /* * If we're already tracing, do not add a warning. */ if (!TRACE && !me->inBadHTML) { HTUserMsg(BAD_HTML_USE_TRACE); me->inBadHTML = TRUE; } code = TRACE; break; } return code; } /* * Handle the formatted message. */ void LYShowBadHTML(const char *message) { if (dump_output_immediately && dump_to_stderr) fprintf(stderr, "%s", message); switch ((enumBadHtml) cfg_bad_html) { case BAD_HTML_IGNORE: break; case BAD_HTML_TRACE: case BAD_HTML_MESSAGE: case BAD_HTML_WARN: CTRACE((tfp, "%s", message)); break; } switch ((enumBadHtml) cfg_bad_html) { case BAD_HTML_IGNORE: case BAD_HTML_TRACE: case BAD_HTML_WARN: break; case BAD_HTML_MESSAGE: LYstore_message(message); break; } } /*_________________________________________________________________________ * * A C T I O N R O U T I N E S */ /* FIXME: this should be amended to do the substitution only when not in a * multibyte stream. */ #ifdef EXP_JAPANESE_SPACES #define FIX_JAPANESE_SPACES \ (HTCJK == CHINESE || HTCJK == JAPANESE || HTCJK == TAIPEI) /* don't replace '\n' with ' ' if Chinese or Japanese - HN */ #else #define FIX_JAPANESE_SPACES 0 #endif /* Character handling * ------------------ */ void HTML_put_character(HTStructured * me, int c) { unsigned uc = UCH(c); /* * Ignore all non-MAP content when just scanning a document for MAPs. - FM */ if (LYMapsOnly && me->sp[0].tag_number != HTML_OBJECT) return; c = (int) uc; /* * Do EOL conversion if needed. - FM * * Convert EOL styles: * macintosh: cr --> lf * ascii: cr-lf --> lf * unix: lf --> lf */ if ((me->lastraw == '\r') && c == '\n') { me->lastraw = -1; return; } me->lastraw = c; if (c == '\r') { c = '\n'; uc = UCH(c); } /* * Handle SGML_LITTERAL tags that have HTChunk elements. - FM */ switch (me->sp[0].tag_number) { case HTML_COMMENT: return; /* Do Nothing */ case HTML_TITLE: if (c == LY_SOFT_HYPHEN) return; if (c != '\n' && c != '\t' && c != '\r') { HTChunkPutc(&me->title, uc); } else if (FIX_JAPANESE_SPACES) { if (c == '\t') { HTChunkPutc(&me->title, ' '); } else { return; } } else { HTChunkPutc(&me->title, ' '); } return; case HTML_STYLE: HTChunkPutc(&me->style_block, uc); return; case HTML_SCRIPT: HTChunkPutc(&me->script, uc); return; case HTML_OBJECT: HTChunkPutc(&me->object, uc); return; case HTML_TEXTAREA: HTChunkPutc(&me->textarea, uc); return; case HTML_SELECT: case HTML_OPTION: HTChunkPutc(&me->option, uc); return; case HTML_MATH: HTChunkPutc(&me->math, uc); return; default: if (me->inSELECT) { /* * If we are within a SELECT not caught by the cases above - * HTML_SELECT or HTML_OPTION may not be the last element pushed on * the style stack if there were invalid markup tags within a * SELECT element. For error recovery, treat text as part of the * OPTION text, it is probably meant to show up as user-visible * text. Having A as an open element while in SELECT is really * sick, don't make anchor text part of the option text in that * case since the option text will probably just be discarded. - * kw */ if (me->sp[0].tag_number == HTML_A) break; HTChunkPutc(&me->option, uc); return; } break; } /* end first switch */ /* * Handle all other tag content. - FM */ switch (me->sp[0].tag_number) { case HTML_PRE: /* Formatted text */ /* * We guarantee that the style is up-to-date in begin_litteral. But we * still want to strip \r's. */ if (c != '\r' && !(c == '\n' && me->inLABEL && !me->inP) && !(c == '\n' && !me->inPRE)) { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, c); } me->inPRE = TRUE; break; case HTML_LISTING: /* Literal text */ case HTML_XMP: case HTML_PLAINTEXT: /* * We guarantee that the style is up-to-date in begin_litteral. But we * still want to strip \r's. */ if (c != '\r') { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, c); } break; default: /* * Free format text. */ if (me->sp->style->id == ST_Preformatted) { if (c != '\r' && !(c == '\n' && me->inLABEL && !me->inP) && !(c == '\n' && !me->inPRE)) { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, c); } me->inPRE = TRUE; } else if (me->sp->style->id == ST_Listing || me->sp->style->id == ST_Example) { if (c != '\r') { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, c); } } else { if (me->style_change) { if ((c == '\n') || (c == ' ')) return; /* Ignore it */ UPDATE_STYLE; } if (c == '\n') { if (!FIX_JAPANESE_SPACES) { if (me->in_word) { if (HText_getLastChar(me->text) != ' ') { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, ' '); } me->in_word = NO; } } } else if (c == ' ' || c == '\t') { if (HText_getLastChar(me->text) != ' ') { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, ' '); } } else if (c == '\r') { /* ignore */ } else { me->inP = TRUE; me->inLABEL = FALSE; HText_appendCharacter(me->text, c); me->in_word = YES; } } } /* end second switch */ if (c == '\n' || c == '\t') { HText_setLastChar(me->text, ' '); /* set it to a generic separator */ } else { HText_setLastChar(me->text, c); } } /* String handling * --------------- * * This is written separately from put_character because the loop can * in some cases be promoted to a higher function call level for speed. */ void HTML_put_string(HTStructured * me, const char *s) { HTChunk *target = NULL; #ifdef USE_PRETTYSRC char *translated_string = NULL; #endif if (s == NULL || (LYMapsOnly && me->sp[0].tag_number != HTML_OBJECT)) return; #ifdef USE_PRETTYSRC if (psrc_convert_string) { StrAllocCopy(translated_string, s); TRANSLATE_AND_UNESCAPE_ENTITIES(&translated_string, TRUE, FALSE); s = (const char *) translated_string; } #endif switch (me->sp[0].tag_number) { case HTML_COMMENT: break; /* Do Nothing */ case HTML_TITLE: target = &me->title; break; case HTML_STYLE: target = &me->style_block; break; case HTML_SCRIPT: target = &me->script; break; case HTML_PRE: /* Formatted text */ case HTML_LISTING: /* Literal text */ case HTML_XMP: case HTML_PLAINTEXT: /* * We guarantee that the style is up-to-date in begin_litteral */ HText_appendText(me->text, s); break; case HTML_OBJECT: target = &me->object; break; case HTML_TEXTAREA: target = &me->textarea; break; case HTML_SELECT: case HTML_OPTION: target = &me->option; break; case HTML_MATH: target = &me->math; break; default: /* Free format text? */ if (!me->sp->style->freeFormat) { /* * If we are within a preformatted text style not caught by the * cases above (HTML_PRE or similar may not be the last element * pushed on the style stack). - kw */ #ifdef USE_PRETTYSRC if (psrc_view) { /* * We do this so that a raw '\r' in the string will not be * interpreted as an internal request to break a line - passing * '\r' to HText_appendText is treated by it as a request to * insert a blank line - VH */ for (; *s; ++s) HTML_put_character(me, *s); } else #endif HText_appendText(me->text, s); break; } else { const char *p = s; char c; if (me->style_change) { for (; *p && ((*p == '\n') || (*p == '\r') || (*p == ' ') || (*p == '\t')); p++) ; /* Ignore leaders */ if (!*p) break; UPDATE_STYLE; } for (; *p; p++) { if (*p == 13 && p[1] != 10) { /* * Treat any '\r' which is not followed by '\n' as '\n', to * account for macintosh lineend in ALT attributes etc. - * kw */ c = '\n'; } else { c = *p; } if (me->style_change) { if ((c == '\n') || (c == ' ') || (c == '\t')) continue; /* Ignore it */ UPDATE_STYLE; } if (c == '\n') { if (!FIX_JAPANESE_SPACES) { if (me->in_word) { if (HText_getLastChar(me->text) != ' ') HText_appendCharacter(me->text, ' '); me->in_word = NO; } } } else if (c == ' ' || c == '\t') { if (HText_getLastChar(me->text) != ' ') HText_appendCharacter(me->text, ' '); } else if (c == '\r') { /* ignore */ } else { HText_appendCharacter(me->text, c); me->in_word = YES; } /* set the Last Character */ if (c == '\n' || c == '\t') { /* set it to a generic separator */ HText_setLastChar(me->text, ' '); } else if (c == '\r' && HText_getLastChar(me->text) == ' ') { /* * \r's are ignored. In order to keep collapsing spaces * correctly, we must default back to the previous * separator, if there was one. So we set LastChar to a * generic separator. */ HText_setLastChar(me->text, ' '); } else { HText_setLastChar(me->text, c); } } /* for */ } } /* end switch */ if (target != NULL) { if (target->data == s) { CTRACE((tfp, "BUG: appending chunk to itself: `%.*s'\n", target->size, target->data)); } else { HTChunkPuts(target, s); } } #ifdef USE_PRETTYSRC if (psrc_convert_string) { psrc_convert_string = FALSE; FREE(translated_string); } #endif } /* Buffer write * ------------ */ void HTML_write(HTStructured * me, const char *s, int l) { const char *p; const char *e = s + l; if (LYMapsOnly && me->sp[0].tag_number != HTML_OBJECT) return; for (p = s; p < e; p++) HTML_put_character(me, *p); } /* * "Internal links" are hyperlinks whose source and destination are * within the same document, and for which the destination is given * as a URL Reference with an empty URL, but possibly with a non-empty * #fragment. (This terminology re URL-Reference vs. URL follows the * Fielding URL syntax and semantics drafts). * Differences: * (1) The document's base (in whatever way it is given) is not used for * resolving internal link references. * (2) Activating an internal link should not result in a new retrieval * of a copy of the document. * (3) Internal links are the only way to refer with a hyperlink to a document * (or a location in it) which is only known as the result of a POST * request (doesn't have a URL from which the document can be retrieved * with GET), and can only be used from within that document. * * *If track_internal_links is true, we keep track of whether a * link destination was given as an internal link. This information is * recorded in the type of the link between anchor objects, and is available * to the HText object and the mainloop from there. URL References to * internal destinations are still resolved into an absolute form before * being passed on, but using the current stream's retrieval address instead * of the base URL. * Examples: (replace [...] to have a valid absolute URL) * In document retrieved from [...]/mypath/mydoc.htm w/ base [...]/otherpath/ * a. HREF="[...]/mypath/mydoc.htm" -> [...]/mypath/mydoc.htm * b. HREF="[...]/mypath/mydoc.htm#frag" -> [...]/mypath/mydoc.htm#frag * c. HREF="mydoc.htm" -> [...]/otherpath/mydoc.htm * d. HREF="mydoc.htm#frag" -> [...]/otherpath/mydoc.htm#frag * e. HREF="" -> [...]/mypath/mydoc.htm (marked internal) * f. HREF="#frag" -> [...]/mypath/mydoc.htm#frag (marked internal) * * *If track_internal_links is false, URL-less URL-References are * resolved differently from URL-References with a non-empty URL (using the * current stream's retrieval address instead of the base), but we make no * further distinction. Resolution is then as in the examples above, execept * that there is no "(marked internal)". * * *Note that this doesn't apply to form ACTIONs (always resolved using base, * never marked internal). Also other references encountered or generated * are not marked internal, whether they have a URL or not, if in a given * context an internal link makes no sense (e.g., IMG SRC=). */ /* A flag is used to keep track of whether an "URL reference" encountered had a real "URL" or not. In the latter case, it will be marked as "internal". The flag is set before we start messing around with the string (resolution of relative URLs etc.). This variable only used locally here, don't confuse with LYinternal_flag which is for overriding non-caching similar to LYoverride_no_cache. - kw */ #define CHECK_FOR_INTERN(flag,s) \ flag = (BOOLEAN) (((s) && (*(s)=='#' || *(s)=='\0')) ? TRUE : FALSE) /* Last argument to pass to HTAnchor_findChildAndLink() calls, just an abbreviation. - kw */ #define INTERN_CHK(flag) (HTLinkType *)((flag) ? HTInternalLink : NULL) #define INTERN_LT INTERN_CHK(intern_flag) #ifdef USE_COLOR_STYLE static char *Style_className = 0; static char *Style_className_end = 0; static size_t Style_className_len = 0; static int hcode; #ifdef LY_FIND_LEAKS static void free_Style_className(void) { FREE(Style_className); } #endif static void addClassName(const char *prefix, const char *actual, size_t length) { size_t offset = strlen(prefix); size_t have = (unsigned) (Style_className_end - Style_className); size_t need = (offset + length + 1); if ((have + need) >= Style_className_len) { Style_className_len += 1024 + 2 * (have + need); if (Style_className == 0) { Style_className = typeMallocn(char, Style_className_len); } else { Style_className = typeRealloc(char, Style_className, Style_className_len); } if (Style_className == NULL) outofmem(__FILE__, "addClassName"); Style_className_end = Style_className + have; } if (offset) strcpy(Style_className_end, prefix); if (length) memcpy(Style_className_end + offset, actual, length); Style_className_end[offset + length] = '\0'; strtolower(Style_className_end); Style_className_end += (offset + length); } #else #define addClassName(prefix, actual, length) /* nothing */ #endif #ifdef USE_PRETTYSRC static void HTMLSRC_apply_markup(HTStructured * context, HTlexeme lexeme, int start, int tag_charset) { HT_tagspec *ts = *((start ? lexeme_start : lexeme_end) + lexeme); while (ts) { #ifdef USE_COLOR_STYLE if (ts->start) { current_tag_style = ts->style; force_current_tag_style = TRUE; forced_classname = ts->class_name; force_classname = TRUE; } #endif CTRACE((tfp, ts->start ? "SRCSTART %d\n" : "SRCSTOP %d\n", (int) lexeme)); if (ts->start) HTML_start_element(context, (int) ts->element, ts->present, (STRING2PTR) ts->value, tag_charset, NULL); else HTML_end_element(context, (int) ts->element, NULL); ts = ts->next; } } # define START TRUE # define STOP FALSE # define PSRCSTART(x) HTMLSRC_apply_markup(me,HTL_##x,START,tag_charset) # define PSRCSTOP(x) HTMLSRC_apply_markup(me,HTL_##x,STOP,tag_charset) # define PUTC(x) HTML_put_character(me,x) # define PUTS(x) HTML_put_string(me,x) #endif /* USE_PRETTYSRC */ static void LYStartArea(HTStructured * obj, const char *href, const char *alt, const char *title, int tag_charset) { BOOL new_present[HTML_AREA_ATTRIBUTES]; const char *new_value[HTML_AREA_ATTRIBUTES]; int i; for (i = 0; i < HTML_AREA_ATTRIBUTES; i++) new_present[i] = NO; if (alt) { new_present[HTML_AREA_ALT] = YES; new_value[HTML_AREA_ALT] = (const char *) alt; } if (non_empty(title)) { new_present[HTML_AREA_TITLE] = YES; new_value[HTML_AREA_TITLE] = (const char *) title; } if (href) { new_present[HTML_AREA_HREF] = YES; new_value[HTML_AREA_HREF] = (const char *) href; } (*obj->isa->start_element) (obj, HTML_AREA, new_present, new_value, tag_charset, 0); } static void LYHandleFIG(HTStructured * me, const BOOL *present, STRING2PTR value, int isobject, int imagemap, const char *id, const char *src, int convert, int start, BOOL *intern_flag GCC_UNUSED) { if (start == TRUE) { me->inFIG = TRUE; if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, NULL); } if (!isobject) { LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); me->inFIGwithP = TRUE; } else { me->inFIGwithP = FALSE; HTML_put_character(me, ' '); /* space char may be ignored */ } if (non_empty(id)) { if (present && convert) { CHECK_ID(HTML_FIG_ID); } else LYHandleID(me, id); } me->in_word = NO; me->inP = FALSE; if (clickable_images && non_empty(src)) { char *href = NULL; StrAllocCopy(href, src); CHECK_FOR_INTERN(*intern_flag, href); LYLegitimizeHREF(me, &href, TRUE, TRUE); if (*href) { me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ INTERN_CHK(*intern_flag)); /* Type */ HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, (isobject ? (imagemap ? "(IMAGE)" : "(OBJECT)") : "[FIGURE]")); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, 0); HTML_put_character(me, '-'); HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } FREE(href); } } else { /* handle end tag */ if (me->inFIGwithP) { LYEnsureDoubleSpace(me); } else { HTML_put_character(me, ' '); /* space char may be ignored */ } LYResetParagraphAlignment(me); me->inFIGwithP = FALSE; me->inFIG = FALSE; change_paragraph_style(me, me->sp->style); /* Often won't really change */ if (me->List_Nesting_Level >= 0) { UPDATE_STYLE; HText_NegateLineOne(me->text); } } } static void clear_objectdata(HTStructured * me) { if (me) { HTChunkClear(&me->object); me->object_started = FALSE; me->object_declare = FALSE; me->object_shapes = FALSE; me->object_ismap = FALSE; FREE(me->object_usemap); FREE(me->object_id); FREE(me->object_title); FREE(me->object_data); FREE(me->object_type); FREE(me->object_classid); FREE(me->object_codebase); FREE(me->object_codetype); FREE(me->object_name); } } #define HTParseALL(pp,pconst) \ { char* free_me = *pp; \ *pp = HTParse(*pp, pconst, PARSE_ALL); \ FREE(free_me); \ } /* Start Element * ------------- */ static int HTML_start_element(HTStructured * me, int element_number, const BOOL *present, STRING2PTR value, int tag_charset, char **include) { char *alt_string = NULL; char *id_string = NULL; char *newtitle = NULL; char **pdoctitle = NULL; char *href = NULL; char *map_href = NULL; char *title = NULL; char *I_value = NULL; char *I_name = NULL; char *temp = NULL; const char *Base = NULL; int dest_char_set = -1; HTParentAnchor *dest = NULL; /* An anchor's destination */ BOOL dest_ismap = FALSE; /* Is dest an image map script? */ HTChildAnchor *ID_A = NULL; /* HTML_foo_ID anchor */ int url_type = 0, i = 0; char *cp = NULL; HTMLElement ElementNumber = (HTMLElement) element_number; BOOL intern_flag = FALSE; short stbl_align = HT_ALIGN_NONE; int status = HT_OK; #ifdef USE_COLOR_STYLE char *class_name; int class_used = 0; #endif #ifdef USE_PRETTYSRC if (psrc_view && !sgml_in_psrc_was_initialized) { if (!psrc_nested_call) { HTTag *tag = &HTML_dtd.tags[element_number]; char buf[200]; const char *p; if (psrc_first_tag) { psrc_first_tag = FALSE; /* perform the special actions on the begining of the document. It's assumed that all lynx modules start generating html from tag (ie not a text) so we are able to trap this moment and initialize. */ psrc_nested_call = TRUE; HTML_start_element(me, HTML_BODY, NULL, NULL, tag_charset, NULL); HTML_start_element(me, HTML_PRE, NULL, NULL, tag_charset, NULL); PSRCSTART(entire); psrc_nested_call = FALSE; } psrc_nested_call = TRUE; /*write markup for tags and exit */ PSRCSTART(abracket); PUTC('<'); PSRCSTOP(abracket); PSRCSTART(tag); if (tagname_transform != 0) PUTS(tag->name); else { LYStrNCpy(buf, tag->name, sizeof(buf) - 1); LYLowerCase(buf); PUTS(buf); } if (present) { for (i = 0; i < tag->number_of_attributes; i++) if (present[i]) { PUTC(' '); PSRCSTART(attrib); if (attrname_transform != 0) PUTS(tag->attributes[i].name); else { LYStrNCpy(buf, tag->attributes[i].name, sizeof(buf) - 1); LYLowerCase(buf); PUTS(buf); } if (value[i]) { char q = '"'; /*0 in dquotes, 1 - in quotes, 2 mixed */ char kind = (char) (!StrChr(value[i], '"') ? 0 : !StrChr(value[i], '\'') ? q = '\'', 1 : 2); PUTC('='); PSRCSTOP(attrib); PSRCSTART(attrval); PUTC(q); /*is it special ? */ if (tag->attributes[i].type == HTMLA_ANAME) { HTStartAnchor(me, value[i], NULL); HTML_end_element(me, HTML_A, NULL); } else if (tag->attributes[i].type == HTMLA_HREF) { PSRCSTART(href); HTStartAnchor(me, NULL, value[i]); } if (kind != 2) PUTS(value[i]); else for (p = value[i]; *p; p++) if (*p != '"') PUTC(*p); else PUTS("&#34;"); /*is it special ? */ if (tag->attributes[i].type == HTMLA_HREF) { HTML_end_element(me, HTML_A, NULL); PSRCSTOP(href); } PUTC(q); PSRCSTOP(attrval); } /* if value */ } /* if present[i] */ } /* if present */ PSRCSTOP(tag); PSRCSTART(abracket); PUTC('>'); PSRCSTOP(abracket); psrc_nested_call = FALSE; return HT_OK; } /*if (!psrc_nested_call) */ /*fall through */ } #endif /* USE_PRETTYSRC */ if (LYMapsOnly) { if (!(ElementNumber == HTML_MAP || ElementNumber == HTML_AREA || ElementNumber == HTML_BASE || ElementNumber == HTML_OBJECT || ElementNumber == HTML_A)) { return HT_OK; } } else if (!me->text) { UPDATE_STYLE; } { /* me->tag_charset is charset for attribute values. */ int j = ((tag_charset < 0) ? me->UCLYhndl : tag_charset); if ((me->tag_charset != j) || (j < 0 /* for trace entry */ )) { CTRACE((tfp, "me->tag_charset: %d -> %d", me->tag_charset, j)); CTRACE((tfp, " (me->UCLYhndl: %d, tag_charset: %d)\n", me->UCLYhndl, tag_charset)); me->tag_charset = j; } } /* this should be done differently */ #if defined(USE_COLOR_STYLE) addClassName(";", HTML_dtd.tags[element_number].name, (size_t) HTML_dtd.tags[element_number].name_len); class_name = (force_classname ? forced_classname : class_string); force_classname = FALSE; if (force_current_tag_style == FALSE) { current_tag_style = (class_name[0] ? -1 : cached_tag_styles[element_number]); } else { force_current_tag_style = FALSE; } CTRACE2(TRACE_STYLE, (tfp, "CSS.elt:<%s>\n", HTML_dtd.tags[element_number].name)); if (current_tag_style == -1) { /* Append class_name */ hcode = hash_code_lowercase_on_fly(HTML_dtd.tags[element_number].name); if (class_name[0]) { int ohcode = hcode; hcode = hash_code_aggregate_char('.', hcode); hcode = hash_code_aggregate_lower_str(class_name, hcode); if (!hashStyles[hcode].name) { /* None such -> classless version */ hcode = ohcode; CTRACE2(TRACE_STYLE, (tfp, "STYLE.start_element: <%s> (class <%s> not configured), hcode=%d.\n", HTML_dtd.tags[element_number].name, class_name, hcode)); } else { addClassName(".", class_name, strlen(class_name)); CTRACE2(TRACE_STYLE, (tfp, "STYLE.start_element: <%s>.<%s>, hcode=%d.\n", HTML_dtd.tags[element_number].name, class_name, hcode)); class_used = 1; } } class_string[0] = '\0'; } else { /* (current_tag_style!=-1) */ if (class_name[0]) { addClassName(".", class_name, strlen(class_name)); class_string[0] = '\0'; } hcode = current_tag_style; CTRACE2(TRACE_STYLE, (tfp, "STYLE.start_element: <%s>, hcode=%d.\n", HTML_dtd.tags[element_number].name, hcode)); current_tag_style = -1; } #if !OMIT_SCN_KEEPING /* Can be done in other cases too... */ if (!class_used && ElementNumber == HTML_INPUT) { /* For some other too? */ const char *type = ""; int ohcode = hcode; if (present && present[HTML_INPUT_TYPE] && value[HTML_INPUT_TYPE]) type = value[HTML_INPUT_TYPE]; hcode = hash_code_aggregate_lower_str(".type.", hcode); hcode = hash_code_aggregate_lower_str(type, hcode); if (!hashStyles[hcode].name) { /* None such -> classless version */ hcode = ohcode; CTRACE2(TRACE_STYLE, (tfp, "STYLE.start_element: type <%s> not configured.\n", type)); } else { addClassName(".type.", type, strlen(type)); CTRACE2(TRACE_STYLE, (tfp, "STYLE.start_element: <%s>.type.<%s>, hcode=%d.\n", HTML_dtd.tags[element_number].name, type, hcode)); } } #endif /* !OMIT_SCN_KEEPING */ HText_characterStyle(me->text, hcode, STACK_ON); #endif /* USE_COLOR_STYLE */ /* * Handle the start tag. - FM */ switch (ElementNumber) { case HTML_HTML: break; case HTML_HEAD: break; case HTML_BASE: if (present && present[HTML_BASE_HREF] && !local_host_only && non_empty(value[HTML_BASE_HREF])) { char *base = NULL; const char *related = NULL; StrAllocCopy(base, value[HTML_BASE_HREF]); CTRACE((tfp, "*HTML_BASE: initial href=`%s'\n", NonNull(base))); if (!(url_type = LYLegitimizeHREF(me, &base, TRUE, TRUE))) { CTRACE((tfp, "HTML: BASE '%s' is not an absolute URL.\n", NonNull(base))); if (me->inBadBASE == FALSE) HTAlert(BASE_NOT_ABSOLUTE); me->inBadBASE = TRUE; } if (url_type == LYNXIMGMAP_URL_TYPE) { /* * These have a non-standard form, basically strip the prefix * or the code below would insert a nonsense host into the * pseudo URL. These should never occur where they would be * used for resolution of relative URLs anyway. We can also * strip the #map part. - kw */ temp = base; base = HTParse(base + 11, "", PARSE_ALL_WITHOUT_ANCHOR); FREE(temp); } /* * Get parent's address for defaulted fields. */ related = me->node_anchor->address; /* * Create the access field. */ temp = HTParse(base, related, PARSE_ACCESS + PARSE_PUNCTUATION); StrAllocCopy(me->base_href, temp); FREE(temp); /* * Create the host[:port] field. */ temp = HTParse(base, "", PARSE_HOST + PARSE_PUNCTUATION); if (!StrNCmp(temp, "//", 2)) { StrAllocCat(me->base_href, temp); if (!strcmp(me->base_href, "file://")) { StrAllocCat(me->base_href, "localhost"); } } else { if (isFILE_URL(me->base_href)) { StrAllocCat(me->base_href, "//localhost"); } else if (strcmp(me->base_href, STR_NEWS_URL)) { FREE(temp); StrAllocCat(me->base_href, (temp = HTParse(related, "", PARSE_HOST + PARSE_PUNCTUATION))); } } FREE(temp); /* * Create the path field. */ temp = HTParse(base, "", PARSE_PATH + PARSE_PUNCTUATION); if (*temp != '\0') { char *p = StrChr(temp, '?'); if (p) *p = '\0'; p = strrchr(temp, '/'); if (p) *(p + 1) = '\0'; /* strip after the last slash */ StrAllocCat(me->base_href, temp); } else if (!strcmp(me->base_href, STR_NEWS_URL)) { StrAllocCat(me->base_href, "*"); } else if (isNEWS_URL(me->base_href) || isNNTP_URL(me->base_href) || isSNEWS_URL(me->base_href)) { StrAllocCat(me->base_href, "/*"); } else { StrAllocCat(me->base_href, "/"); } FREE(temp); FREE(base); me->inBASE = TRUE; me->node_anchor->inBASE = TRUE; StrAllocCopy(me->node_anchor->content_base, me->base_href); /* me->base_href is a valid URL */ CTRACE((tfp, "*HTML_BASE: final href=`%s'\n", me->base_href)); } break; case HTML_META: if (present) LYHandleMETA(me, present, value, include); break; case HTML_TITLE: HTChunkClear(&me->title); break; case HTML_LINK: intern_flag = FALSE; if (present && present[HTML_LINK_HREF]) { CHECK_FOR_INTERN(intern_flag, value[HTML_LINK_HREF]); /* * Prepare to do housekeeping on the reference. - FM */ if (isEmpty(value[HTML_LINK_HREF])) { Base = (me->inBASE) ? me->base_href : me->node_anchor->address; StrAllocCopy(href, Base); } else { StrAllocCopy(href, value[HTML_LINK_HREF]); (void) LYLegitimizeHREF(me, &href, TRUE, TRUE); Base = (me->inBASE && *href != '\0' && *href != '#') ? me->base_href : me->node_anchor->address; HTParseALL(&href, Base); } /* * Handle links with a REV attribute. - FM * Handle REV="made" or REV="owner". - LM & FM * Handle REL="author" -TD */ if (present && ((present[HTML_LINK_REV] && value[HTML_LINK_REV] && (!strcasecomp("made", value[HTML_LINK_REV]) || !strcasecomp("owner", value[HTML_LINK_REV]))) || (present[HTML_LINK_REL] && value[HTML_LINK_REL] && (!strcasecomp("author", value[HTML_LINK_REL]))))) { /* * Load the owner element. - FM */ HTAnchor_setOwner(me->node_anchor, href); CTRACE((tfp, "HTML: DOC OWNER '%s' found\n", href)); FREE(href); /* * Load the RevTitle element if a TITLE attribute and value * are present. - FM */ if (present && present[HTML_LINK_TITLE] && value[HTML_LINK_TITLE] && *value[HTML_LINK_TITLE] != '\0') { StrAllocCopy(title, value[HTML_LINK_TITLE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&title, TRUE, FALSE); LYTrimHead(title); LYTrimTail(title); if (*title != '\0') HTAnchor_setRevTitle(me->node_anchor, title); FREE(title); } break; } /* * Handle REL links. - FM */ if (present && present[HTML_LINK_REL] && value[HTML_LINK_REL]) { /* * Ignore style sheets, for now. - FM * * lss and css have different syntax - lynx shouldn't try to * parse them now (it tries to parse them as lss, so it exits * with error message on the 1st non-empty line) - VH */ #ifndef USE_COLOR_STYLE if (!strcasecomp(value[HTML_LINK_REL], "StyleSheet") || !strcasecomp(value[HTML_LINK_REL], "Style")) { CTRACE2(TRACE_STYLE, (tfp, "HTML: StyleSheet link found.\n")); CTRACE2(TRACE_STYLE, (tfp, " StyleSheets not yet implemented.\n")); FREE(href); break; } #endif /* ! USE_COLOR_STYLE */ /* * Ignore anything not registered in the 28-Mar-95 IETF HTML * 3.0 draft and W3C HTML 3.2 draft, or not appropriate for * Lynx banner links in the expired Maloney and Quin relrev * draft. We'll make this more efficient when the situation * stabilizes, and for now, we'll treat "Banner" as another * toolbar element. - FM */ if (!strcasecomp(value[HTML_LINK_REL], "Home") || !strcasecomp(value[HTML_LINK_REL], "ToC") || !strcasecomp(value[HTML_LINK_REL], "Contents") || !strcasecomp(value[HTML_LINK_REL], "Index") || !strcasecomp(value[HTML_LINK_REL], "Glossary") || !strcasecomp(value[HTML_LINK_REL], "Copyright") || !strcasecomp(value[HTML_LINK_REL], "Help") || !strcasecomp(value[HTML_LINK_REL], "Search") || !strcasecomp(value[HTML_LINK_REL], "Bookmark") || !strcasecomp(value[HTML_LINK_REL], "Banner") || !strcasecomp(value[HTML_LINK_REL], "Top") || !strcasecomp(value[HTML_LINK_REL], "Origin") || !strcasecomp(value[HTML_LINK_REL], "Navigator") || !strcasecomp(value[HTML_LINK_REL], "Disclaimer") || !strcasecomp(value[HTML_LINK_REL], "Author") || !strcasecomp(value[HTML_LINK_REL], "Editor") || !strcasecomp(value[HTML_LINK_REL], "Publisher") || !strcasecomp(value[HTML_LINK_REL], "Trademark") || !strcasecomp(value[HTML_LINK_REL], "Hotlist") || !strcasecomp(value[HTML_LINK_REL], "Begin") || !strcasecomp(value[HTML_LINK_REL], "First") || !strcasecomp(value[HTML_LINK_REL], "End") || !strcasecomp(value[HTML_LINK_REL], "Last") || !strcasecomp(value[HTML_LINK_REL], "Documentation") || !strcasecomp(value[HTML_LINK_REL], "Biblioentry") || !strcasecomp(value[HTML_LINK_REL], "Bibliography") || !strcasecomp(value[HTML_LINK_REL], "Start") || !strcasecomp(value[HTML_LINK_REL], "Appendix")) { StrAllocCopy(title, value[HTML_LINK_REL]); pdoctitle = &title; /* for setting HTAnchor's title */ } else if (!strcasecomp(value[HTML_LINK_REL], "Up") || !strcasecomp(value[HTML_LINK_REL], "Next") || !strcasecomp(value[HTML_LINK_REL], "Previous") || !strcasecomp(value[HTML_LINK_REL], "Prev") || !strcasecomp(value[HTML_LINK_REL], "Child") || !strcasecomp(value[HTML_LINK_REL], "Sibling") || !strcasecomp(value[HTML_LINK_REL], "Parent") || !strcasecomp(value[HTML_LINK_REL], "Meta") || !strcasecomp(value[HTML_LINK_REL], "URC") || !strcasecomp(value[HTML_LINK_REL], "Pointer") || !strcasecomp(value[HTML_LINK_REL], "Translation") || !strcasecomp(value[HTML_LINK_REL], "Definition") || !strcasecomp(value[HTML_LINK_REL], "Alternate") || !strcasecomp(value[HTML_LINK_REL], "Section") || !strcasecomp(value[HTML_LINK_REL], "Subsection") || !strcasecomp(value[HTML_LINK_REL], "Chapter")) { StrAllocCopy(title, value[HTML_LINK_REL]); /* not setting target HTAnchor's title, for these links of highly relative character. Instead, try to remember the REL attribute as a property of the link (but not the destination), in the (otherwise underused) link type in a special format; the LIST page generation code may later use it. - kw */ if (!intern_flag) { StrAllocCopy(temp, "RelTitle: "); StrAllocCat(temp, value[HTML_LINK_REL]); } #ifndef DISABLE_BIBP } else if (!strcasecomp(value[HTML_LINK_REL], "citehost")) { /* Citehost determination for bibp links. - RDC */ HTAnchor_setCitehost(me->node_anchor, href); CTRACE((tfp, "HTML: citehost '%s' found\n", href)); FREE(href); break; #endif } else { CTRACE((tfp, "HTML: LINK with REL=\"%s\" ignored.\n", value[HTML_LINK_REL])); FREE(href); break; } } } else if (present && present[HTML_LINK_REL] && value[HTML_LINK_REL]) { /* * If no HREF was specified, handle special REL links with * self-designated HREFs. - FM */ if (!strcasecomp(value[HTML_LINK_REL], "Home")) { StrAllocCopy(href, LynxHome); } else if (!strcasecomp(value[HTML_LINK_REL], "Help")) { StrAllocCopy(href, helpfile); } else if (!strcasecomp(value[HTML_LINK_REL], "Index")) { StrAllocCopy(href, indexfile); } else { CTRACE((tfp, "HTML: LINK with REL=\"%s\" and no HREF ignored.\n", value[HTML_LINK_REL])); break; } StrAllocCopy(title, value[HTML_LINK_REL]); pdoctitle = &title; } if (href) { /* * Create a title (link name) from the TITLE value, if present, or * default to the REL value that was loaded into title. - FM */ if (present && present[HTML_LINK_TITLE] && non_empty(value[HTML_LINK_TITLE])) { StrAllocCopy(title, value[HTML_LINK_TITLE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&title, TRUE, FALSE); LYTrimHead(title); LYTrimTail(title); pdoctitle = &title; FREE(temp); /* forget about recording RelTitle - kw */ } if (isEmpty(title)) { FREE(href); FREE(title); break; } if (me->inA) { /* * Ugh! The LINK tag, which is a HEAD element, is in an * Anchor, which is BODY element. All we can do is close the * Anchor and cross our fingers. - FM */ SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } /* * Create anchors for the links that simulate a toolbar. - FM */ me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (temp ? (HTLinkType *) HTAtom_for(temp) : INTERN_LT)); /* Type */ FREE(temp); if ((dest = HTAnchor_parent(HTAnchor_followLink(me->CurrentA) )) != NULL) { if (pdoctitle && !HTAnchor_title(dest)) HTAnchor_setTitle(dest, *pdoctitle); /* Don't allow CHARSET attribute to change *this* document's charset assumption. - kw */ if (dest == me->node_anchor) dest = NULL; if (present[HTML_LINK_CHARSET] && non_empty(value[HTML_LINK_CHARSET])) { dest_char_set = UCGetLYhndl_byMIME(value[HTML_LINK_CHARSET]); if (dest_char_set < 0) dest_char_set = UCLYhndl_for_unrec; } if (dest && dest_char_set >= 0) HTAnchor_setUCInfoStage(dest, dest_char_set, UCT_STAGE_PARSER, UCT_SETBY_LINK); } UPDATE_STYLE; if (!HText_hasToolbar(me->text) && (ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ LYToolbarName, /* Tag */ NULL, /* Addresss */ (HTLinkType *) 0))) { /* Type */ HText_appendCharacter(me->text, '#'); HText_setLastChar(me->text, ' '); /* absorb white space */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); HText_setToolbar(me->text); } else { /* * Add collapsible space to separate link from previous * generated links. - kw */ HTML_put_character(me, ' '); } HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); #ifdef USE_COLOR_STYLE if (present && present[HTML_LINK_CLASS] && non_empty(value[HTML_LINK_CLASS])) { char *tmp = 0; HTSprintf0(&tmp, "link.%s.%s", value[HTML_LINK_CLASS], title); CTRACE2(TRACE_STYLE, (tfp, "STYLE.link: using style <%s>\n", tmp)); HText_characterStyle(me->text, hash_code(tmp), STACK_ON); HTML_put_string(me, title); HTML_put_string(me, " ("); HTML_put_string(me, value[HTML_LINK_CLASS]); HTML_put_string(me, ")"); HText_characterStyle(me->text, hash_code(tmp), STACK_OFF); FREE(tmp); } else #endif HTML_put_string(me, title); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, 0); } FREE(href); FREE(title); break; case HTML_ISINDEX: if (((present)) && ((present[HTML_ISINDEX_HREF] && value[HTML_ISINDEX_HREF]) || (present[HTML_ISINDEX_ACTION] && value[HTML_ISINDEX_ACTION]))) { /* * Lynx was supporting ACTION, which never made it into the HTML * 2.0 specs. HTML 3.0 uses HREF, so we'll use that too, but allow * use of ACTION as an alternate until people have fully switched * over. - FM */ if (present[HTML_ISINDEX_HREF] && value[HTML_ISINDEX_HREF]) StrAllocCopy(href, value[HTML_ISINDEX_HREF]); else StrAllocCopy(href, value[HTML_ISINDEX_ACTION]); LYLegitimizeHREF(me, &href, TRUE, TRUE); Base = (me->inBASE && *href != '\0' && *href != '#') ? me->base_href : me->node_anchor->address; HTParseALL(&href, Base); HTAnchor_setIndex(me->node_anchor, href); FREE(href); } else { Base = (me->inBASE) ? me->base_href : me->node_anchor->address; HTAnchor_setIndex(me->node_anchor, Base); } /* * Support HTML 3.0 PROMPT attribute. - FM */ if (present && present[HTML_ISINDEX_PROMPT] && non_empty(value[HTML_ISINDEX_PROMPT])) { StrAllocCopy(temp, value[HTML_ISINDEX_PROMPT]); TRANSLATE_AND_UNESCAPE_ENTITIES(&temp, TRUE, FALSE); LYTrimHead(temp); LYTrimTail(temp); if (*temp != '\0') { StrAllocCat(temp, " "); HTAnchor_setPrompt(me->node_anchor, temp); } else { HTAnchor_setPrompt(me->node_anchor, ENTER_DATABASE_QUERY); } FREE(temp); } else { HTAnchor_setPrompt(me->node_anchor, ENTER_DATABASE_QUERY); } break; case HTML_NEXTID: break; case HTML_STYLE: /* * We're getting it as Literal text, which, for now, we'll just ignore. * - FM */ HTChunkClear(&me->style_block); break; case HTML_SCRIPT: /* * We're getting it as Literal text, which, for now, we'll just ignore. * - FM */ HTChunkClear(&me->script); break; case HTML_BODY: CHECK_ID(HTML_BODY_ID); if (HText_hasToolbar(me->text)) HText_appendParagraph(me->text); break; case HTML_SECTION: case HTML_ARTICLE: case HTML_MAIN: case HTML_ASIDE: case HTML_HEADER: case HTML_FOOTER: case HTML_NAV: CHECK_ID(HTML_GEN5_ID); if (HText_hasToolbar(me->text)) HText_appendParagraph(me->text); break; case HTML_FIGURE: CHECK_ID(HTML_GEN5_ID); break; case HTML_FRAMESET: break; case HTML_FRAME: if (present && present[HTML_FRAME_NAME] && non_empty(value[HTML_FRAME_NAME])) { StrAllocCopy(id_string, value[HTML_FRAME_NAME]); TRANSLATE_AND_UNESCAPE_ENTITIES(&id_string, TRUE, FALSE); LYTrimHead(id_string); LYTrimTail(id_string); } if (present && present[HTML_FRAME_SRC] && non_empty(value[HTML_FRAME_SRC])) { StrAllocCopy(href, value[HTML_FRAME_SRC]); LYLegitimizeHREF(me, &href, TRUE, TRUE); if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ CAN_JUSTIFY_PUSH(FALSE); LYEnsureSingleSpace(me); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "FRAME:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); me->in_word = NO; CHECK_ID(HTML_FRAME_ID); HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, (id_string ? id_string : href)); FREE(href); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, 0); LYEnsureSingleSpace(me); CAN_JUSTIFY_POP; } else { CHECK_ID(HTML_FRAME_ID); } FREE(id_string); break; case HTML_NOFRAMES: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); break; case HTML_IFRAME: if (present && present[HTML_IFRAME_NAME] && non_empty(value[HTML_IFRAME_NAME])) { StrAllocCopy(id_string, value[HTML_IFRAME_NAME]); TRANSLATE_AND_UNESCAPE_ENTITIES(&id_string, TRUE, FALSE); LYTrimHead(id_string); LYTrimTail(id_string); } if (present && present[HTML_IFRAME_SRC] && non_empty(value[HTML_IFRAME_SRC])) { StrAllocCopy(href, value[HTML_IFRAME_SRC]); LYLegitimizeHREF(me, &href, TRUE, TRUE); if (me->inA) HTML_end_element(me, HTML_A, include); me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ LYEnsureDoubleSpace(me); CAN_JUSTIFY_PUSH_F LYResetParagraphAlignment(me); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "IFRAME:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); me->in_word = NO; CHECK_ID(HTML_IFRAME_ID); HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, (id_string ? id_string : href)); FREE(href); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, 0); LYEnsureSingleSpace(me); CAN_JUSTIFY_POP; } else { CHECK_ID(HTML_IFRAME_ID); } FREE(id_string); break; case HTML_BANNER: case HTML_MARQUEE: change_paragraph_style(me, styles[HTML_BANNER]); UPDATE_STYLE; if (me->sp->tag_number == (int) ElementNumber) LYEnsureDoubleSpace(me); /* * Treat this as a toolbar if we don't have one yet, and we are in the * first half of the first page. - FM */ if ((!HText_hasToolbar(me->text) && HText_getLines(me->text) < (display_lines / 2)) && (ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ LYToolbarName, /* Tag */ NULL, /* Addresss */ (HTLinkType *) 0))) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); HText_setToolbar(me->text); } CHECK_ID(HTML_GEN_ID); break; case HTML_CENTER: case HTML_DIV: if (me->Division_Level < (MAX_NESTING - 1)) { me->Division_Level++; } else { CTRACE((tfp, "HTML: ****** Maximum nesting of %d divisions exceeded!\n", MAX_NESTING)); } if (me->inP) LYEnsureSingleSpace(me); /* always at least break line - kw */ if (ElementNumber == HTML_CENTER) { me->DivisionAlignments[me->Division_Level] = HT_CENTER; change_paragraph_style(me, styles[HTML_DCENTER]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DCENTER]->alignment; } else if (me->List_Nesting_Level >= 0 && !(present && present[HTML_DIV_ALIGN] && value[HTML_DIV_ALIGN] && (!strcasecomp(value[HTML_DIV_ALIGN], "center") || !strcasecomp(value[HTML_DIV_ALIGN], "right")))) { if (present && present[HTML_DIV_ALIGN]) me->current_default_alignment = HT_LEFT; else if (me->Division_Level == 0) me->current_default_alignment = HT_LEFT; else if (me->sp[0].tag_number == HTML_UL || me->sp[0].tag_number == HTML_OL || me->sp[0].tag_number == HTML_MENU || me->sp[0].tag_number == HTML_DIR || me->sp[0].tag_number == HTML_LI || me->sp[0].tag_number == HTML_LH || me->sp[0].tag_number == HTML_DD) me->current_default_alignment = HT_LEFT; LYHandlePlike(me, present, value, include, HTML_DIV_ALIGN, TRUE); me->DivisionAlignments[me->Division_Level] = (short) me->current_default_alignment; } else if (present && present[HTML_DIV_ALIGN] && non_empty(value[HTML_DIV_ALIGN])) { if (!strcasecomp(value[HTML_DIV_ALIGN], "center")) { me->DivisionAlignments[me->Division_Level] = HT_CENTER; change_paragraph_style(me, styles[HTML_DCENTER]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DCENTER]->alignment; } else if (!strcasecomp(value[HTML_DIV_ALIGN], "right")) { me->DivisionAlignments[me->Division_Level] = HT_RIGHT; change_paragraph_style(me, styles[HTML_DRIGHT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DRIGHT]->alignment; } else { me->DivisionAlignments[me->Division_Level] = HT_LEFT; change_paragraph_style(me, styles[HTML_DLEFT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DLEFT]->alignment; } } else { me->DivisionAlignments[me->Division_Level] = HT_LEFT; change_paragraph_style(me, styles[HTML_DLEFT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DLEFT]->alignment; } CHECK_ID(HTML_DIV_ID); break; case HTML_H1: case HTML_H2: case HTML_H3: case HTML_H4: case HTML_H5: case HTML_H6: /* * Close the previous style if not done by HTML doc. Added to get rid * of core dumps in BAD HTML on the net. * GAB 07-07-94 * But then again, these are actually allowed to nest. I guess I have * to depend on the HTML writers correct style. * GAB 07-12-94 if (i_prior_style != -1) { HTML_end_element(me, i_prior_style); } i_prior_style = ElementNumber; */ /* * Check whether we have an H# in a list, and if so, treat it as an LH. * - FM */ if ((me->List_Nesting_Level >= 0) && (me->sp[0].tag_number == HTML_UL || me->sp[0].tag_number == HTML_OL || me->sp[0].tag_number == HTML_MENU || me->sp[0].tag_number == HTML_DIR || me->sp[0].tag_number == HTML_LI)) { if (HTML_dtd.tags[HTML_LH].contents == SGML_EMPTY) { ElementNumber = HTML_LH; } else { me->new_style = me->sp[0].style; ElementNumber = (HTMLElement) me->sp[0].tag_number; UPDATE_STYLE; } /* * Some authors use H# headers as a substitute for FONT, so check * if this one immediately followed an LI. If so, both me->inP and * me->in_word will be FALSE (though the line might not be empty * due to a bullet and/or nbsp) and we can assume it is just for a * FONT change. We thus will not create another line break nor add * to the current left indentation. - FM */ if (!(me->inP == FALSE && me->in_word == NO)) { HText_appendParagraph(me->text); HTML_put_character(me, HT_NON_BREAK_SPACE); HText_setLastChar(me->text, ' '); me->in_word = NO; me->inP = FALSE; } CHECK_ID(HTML_H_ID); break; } if (present && present[HTML_H_ALIGN] && non_empty(value[HTML_H_ALIGN])) { if (!strcasecomp(value[HTML_H_ALIGN], "center")) change_paragraph_style(me, styles[HTML_HCENTER]); else if (!strcasecomp(value[HTML_H_ALIGN], "right")) change_paragraph_style(me, styles[HTML_HRIGHT]); else if (!strcasecomp(value[HTML_H_ALIGN], "left") || !strcasecomp(value[HTML_H_ALIGN], "justify")) change_paragraph_style(me, styles[HTML_HLEFT]); else change_paragraph_style(me, styles[ElementNumber]); } else if (me->Division_Level >= 0) { if (me->DivisionAlignments[me->Division_Level] == HT_CENTER) { change_paragraph_style(me, styles[HTML_HCENTER]); } else if (me->DivisionAlignments[me->Division_Level] == HT_LEFT) { change_paragraph_style(me, styles[HTML_HLEFT]); } else if (me->DivisionAlignments[me->Division_Level] == HT_RIGHT) { change_paragraph_style(me, styles[HTML_HRIGHT]); } } else { change_paragraph_style(me, styles[ElementNumber]); } UPDATE_STYLE; CHECK_ID(HTML_H_ID); if ((bold_headers == TRUE || (ElementNumber == HTML_H1 && bold_H1 == TRUE)) && (styles[ElementNumber]->font & HT_BOLD)) { if (me->inBoldA == FALSE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_START_CHAR); } me->inBoldH = TRUE; } break; case HTML_P: LYHandlePlike(me, present, value, include, HTML_P_ALIGN, TRUE); CHECK_ID(HTML_P_ID); break; case HTML_BR: UPDATE_STYLE; CHECK_ID(HTML_GEN_ID); /* Add a \r (new line) if these conditions are true: * * We are not collapsing BR's (and either we are not trimming * blank lines, or the preceding line is non-empty), or * * The current line has text on it. * Otherwise, don't do anything. -DH 19980814, TD 19980827/20170704 */ if ((LYCollapseBRs == FALSE && (!LYtrimBlankLines || !HText_PreviousLineEmpty(me->text, FALSE))) || !HText_LastLineEmpty(me->text, FALSE)) { HText_setLastChar(me->text, ' '); /* absorb white space */ HText_appendCharacter(me->text, '\r'); } me->in_word = NO; me->inP = FALSE; break; case HTML_WBR: UPDATE_STYLE; CHECK_ID(HTML_GEN_ID); HText_setBreakPoint(me->text); break; case HTML_HY: case HTML_SHY: UPDATE_STYLE; CHECK_ID(HTML_GEN_ID); HText_appendCharacter(me->text, LY_SOFT_HYPHEN); break; case HTML_HR: { int width; /* * Start a new line only if we had printable characters following * the previous newline, or remove the previous line if both it and * the last line are blank. - FM */ UPDATE_STYLE; if (!HText_LastLineEmpty(me->text, FALSE)) { HText_setLastChar(me->text, ' '); /* absorb white space */ HText_appendCharacter(me->text, '\r'); } else if (HText_PreviousLineEmpty(me->text, FALSE)) { HText_RemovePreviousLine(me->text); } me->in_word = NO; me->inP = FALSE; /* * Add an ID link if needed. - FM */ CHECK_ID(HTML_HR_ID); /* * Center lines within the current margins, if a right or left * ALIGNment is not specified. If WIDTH="#%" is given and not * garbage, use that to calculate the width, otherwise use the * default width. - FM */ if (present && present[HTML_HR_ALIGN] && value[HTML_HR_ALIGN]) { if (!strcasecomp(value[HTML_HR_ALIGN], "right")) { me->sp->style->alignment = HT_RIGHT; } else if (!strcasecomp(value[HTML_HR_ALIGN], "left")) { me->sp->style->alignment = HT_LEFT; } else { me->sp->style->alignment = HT_CENTER; } } else { me->sp->style->alignment = HT_CENTER; } width = LYcolLimit - me->new_style->leftIndent - me->new_style->rightIndent; if (present && present[HTML_HR_WIDTH] && value[HTML_HR_WIDTH] && isdigit(UCH(*value[HTML_HR_WIDTH])) && value[HTML_HR_WIDTH][strlen(value[HTML_HR_WIDTH]) - 1] == '%') { char *percent = NULL; int Percent, Width; StrAllocCopy(percent, value[HTML_HR_WIDTH]); percent[strlen(percent) - 1] = '\0'; Percent = atoi(percent); if (Percent > 100 || Percent < 1) width -= 5; else { Width = (width * Percent) / 100; if (Width < 1) width = 1; else width = Width; } FREE(percent); } else { width -= 5; } for (i = 0; i < width; i++) HTML_put_character(me, '_'); HText_appendCharacter(me->text, '\r'); me->in_word = NO; me->inP = FALSE; /* * Reset the alignment appropriately for the division and/or block. * - FM */ if (me->List_Nesting_Level < 0 && me->Division_Level >= 0) { me->sp->style->alignment = me->DivisionAlignments[me->Division_Level]; } else if (me->sp->style->id == ST_HeadingCenter || me->sp->style->id == ST_Heading1) { me->sp->style->alignment = HT_CENTER; } else if (me->sp->style->id == ST_HeadingRight) { me->sp->style->alignment = HT_RIGHT; } else { me->sp->style->alignment = HT_LEFT; } /* * Add a blank line and set the second line indentation for lists * and addresses, or a paragraph separator for other blocks. - FM */ if (me->List_Nesting_Level >= 0 || me->sp[0].tag_number == HTML_ADDRESS) { HText_setLastChar(me->text, ' '); /* absorb white space */ HText_appendCharacter(me->text, '\r'); } else { HText_appendParagraph(me->text); } } break; case HTML_TAB: if (!present) { /* Bad tag. Must have at least one attribute. - FM */ CTRACE((tfp, "HTML: TAB tag has no attributes. Ignored.\n")); break; } /* * If page author is using TAB within a TABLE, it's probably formatted * specifically to work well for Lynx without simple table tracking * code. Cancel tracking, it would only make things worse. - kw */ HText_cancelStbl(me->text); UPDATE_STYLE; CANT_JUSTIFY_THIS_LINE; if (present[HTML_TAB_ALIGN] && value[HTML_TAB_ALIGN] && (strcasecomp(value[HTML_TAB_ALIGN], "left") || !(present[HTML_TAB_TO] || present[HTML_TAB_INDENT]))) { /* * Just ensure a collapsible space, until we have the ALIGN and DP * attributes implemented. - FM */ HTML_put_character(me, ' '); CTRACE((tfp, "HTML: ALIGN not 'left'. Using space instead of TAB.\n")); } else if (!LYoverride_default_alignment(me) && me->current_default_alignment != HT_LEFT) { /* * Just ensure a collapsible space, until we can replace * HText_getCurrentColumn() in GridText.c with code which doesn't * require that the alignment be HT_LEFT. - FM */ HTML_put_character(me, ' '); CTRACE((tfp, "HTML: Not HT_LEFT. Using space instead of TAB.\n")); } else if ((present[HTML_TAB_TO] && non_empty(value[HTML_TAB_TO])) || (present[HTML_TAB_INDENT] && value[HTML_TAB_INDENT] && isdigit(UCH(*value[HTML_TAB_INDENT])))) { int column, target = -1; int enval = 2; column = HText_getCurrentColumn(me->text); if (present[HTML_TAB_TO] && non_empty(value[HTML_TAB_TO])) { /* * TO has priority over INDENT if both are present. - FM */ StrAllocCopy(temp, value[HTML_TAB_TO]); TRANSLATE_AND_UNESCAPE_TO_STD(&temp); if (*temp) { target = HText_getTabIDColumn(me->text, temp); } } else if (isEmpty(temp) && present[HTML_TAB_INDENT] && value[HTML_TAB_INDENT] && isdigit(UCH(*value[HTML_TAB_INDENT]))) { /* * The INDENT value is in "en" (enval per column) units. * Divide it by enval, rounding odd values up. - FM */ target = (int) (((1.0 * atoi(value[HTML_TAB_INDENT])) / enval) + (0.5)); } FREE(temp); /* * If we are being directed to a column too far to the left or * right, just add a collapsible space, otherwise, add the * appropriate number of spaces. - FM */ if (target < column || target > HText_getMaximumColumn(me->text)) { HTML_put_character(me, ' '); CTRACE((tfp, "HTML: Column out of bounds. Using space instead of TAB.\n")); } else { for (i = column; i < target; i++) HText_appendCharacter(me->text, ' '); HText_setLastChar(me->text, ' '); /* absorb white space */ } } me->in_word = NO; /* * If we have an ID attribute, save it together with the value of the * column we've reached. - FM */ if (present[HTML_TAB_ID] && non_empty(value[HTML_TAB_ID])) { StrAllocCopy(temp, value[HTML_TAB_ID]); TRANSLATE_AND_UNESCAPE_TO_STD(&temp); if (*temp) HText_setTabID(me->text, temp); FREE(temp); } break; case HTML_BASEFONT: break; case HTML_FONT: /* * FONT *may* have been declared SGML_EMPTY in HTMLDTD.c, and * SGML_character() in SGML.c *may* check for a FONT end tag to call * HTML_end_element() directly (with a check in that to bypass * decrementing of the HTML parser's stack). Or this may have been * really a </FONT> end tag, for which some incarnations of SGML.c * would fake a <FONT> start tag instead. - fm & kw * * But if we have an open FONT, DON'T close that one now, since FONT * tags can be legally nested AFAIK, and Lynx currently doesn't do * anything with them anyway... - kw */ #ifdef NOTUSED_FOTEMODS if (me->inFONT == TRUE) HTML_end_element(me, HTML_FONT, &include); #endif /* NOTUSED_FOTEMODS */ /* * Set flag to know we are in a FONT container, and add code to do * something about it, someday. - FM */ me->inFONT = TRUE; break; case HTML_B: /* Physical character highlighting */ case HTML_BLINK: case HTML_I: case HTML_U: case HTML_CITE: /* Logical character highlighting */ case HTML_EM: case HTML_STRONG: UPDATE_STYLE; me->Underline_Level++; CHECK_ID(HTML_GEN_ID); /* * Ignore this if inside of a bold anchor or header. Can't display * both underline and bold at same time. */ if (me->inBoldA == TRUE || me->inBoldH == TRUE) { CTRACE((tfp, "Underline Level is %d\n", me->Underline_Level)); break; } if (me->inUnderline == FALSE) { HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); me->inUnderline = TRUE; CTRACE((tfp, "Beginning underline\n")); } else { CTRACE((tfp, "Underline Level is %d\n", me->Underline_Level)); } break; case HTML_ABBR: /* Miscellaneous character containers */ case HTML_ACRONYM: case HTML_AU: case HTML_AUTHOR: case HTML_BIG: case HTML_CODE: case HTML_DFN: case HTML_KBD: case HTML_SAMP: case HTML_SMALL: case HTML_TT: case HTML_VAR: CHECK_ID(HTML_GEN_ID); break; /* ignore */ case HTML_SUP: HText_appendCharacter(me->text, '^'); CHECK_ID(HTML_GEN_ID); break; case HTML_SUB: HText_appendCharacter(me->text, '['); CHECK_ID(HTML_GEN_ID); break; case HTML_DEL: case HTML_S: case HTML_STRIKE: CHECK_ID(HTML_GEN_ID); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "[DEL:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); me->in_word = NO; break; case HTML_INS: CHECK_ID(HTML_GEN_ID); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "[INS:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); me->in_word = NO; break; case HTML_Q: CHECK_ID(HTML_GEN_ID); /* * Should check LANG and/or DIR attributes, and the * me->node_anchor->charset and/or yet to be added structure elements, * to determine whether we should use chevrons, but for now we'll * always use double- or single-quotes. - FM */ if (!(me->Quote_Level & 1)) HTML_put_character(me, '"'); else HTML_put_character(me, '`'); me->Quote_Level++; break; case HTML_PRE: /* Formatted text */ /* * Set our inPRE flag to FALSE so that a newline immediately following * the PRE start tag will be ignored. HTML_put_character() will set it * to TRUE when the first character within the PRE block is received. * - FM */ me->inPRE = FALSE; /* FALLTHRU */ case HTML_LISTING: /* Literal text */ /* FALLTHRU */ case HTML_XMP: /* FALLTHRU */ case HTML_PLAINTEXT: change_paragraph_style(me, styles[ElementNumber]); UPDATE_STYLE; CHECK_ID(HTML_GEN_ID); if (me->comment_end) HText_appendText(me->text, me->comment_end); break; case HTML_BLOCKQUOTE: case HTML_BQ: change_paragraph_style(me, styles[ElementNumber]); UPDATE_STYLE; if (me->sp->tag_number == (int) ElementNumber) LYEnsureDoubleSpace(me); CHECK_ID(HTML_BQ_ID); break; case HTML_NOTE: change_paragraph_style(me, styles[ElementNumber]); UPDATE_STYLE; if (me->sp->tag_number == (int) ElementNumber) LYEnsureDoubleSpace(me); CHECK_ID(HTML_NOTE_ID); { char *note = NULL; /* * Indicate the type of NOTE. */ if (present && present[HTML_NOTE_CLASS] && value[HTML_NOTE_CLASS] && (!strcasecomp(value[HTML_NOTE_CLASS], "CAUTION") || !strcasecomp(value[HTML_NOTE_CLASS], "WARNING"))) { StrAllocCopy(note, value[HTML_NOTE_CLASS]); LYUpperCase(note); StrAllocCat(note, ":"); } else if (present && present[HTML_NOTE_ROLE] && value[HTML_NOTE_ROLE] && (!strcasecomp(value[HTML_NOTE_ROLE], "CAUTION") || !strcasecomp(value[HTML_NOTE_ROLE], "WARNING"))) { StrAllocCopy(note, value[HTML_NOTE_ROLE]); LYUpperCase(note); StrAllocCat(note, ":"); } else { StrAllocCopy(note, "NOTE:"); } if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, note); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); CAN_JUSTIFY_START; FREE(note); } CAN_JUSTIFY_START; me->inLABEL = TRUE; me->in_word = NO; me->inP = FALSE; break; case HTML_ADDRESS: if (me->List_Nesting_Level < 0) { change_paragraph_style(me, styles[ElementNumber]); UPDATE_STYLE; if (me->sp->tag_number == (int) ElementNumber) LYEnsureDoubleSpace(me); } else { LYHandlePlike(me, present, value, include, -1, TRUE); } CHECK_ID(HTML_ADDRESS_ID); break; case HTML_DL: me->List_Nesting_Level++; /* increment the List nesting level */ if (me->List_Nesting_Level <= 0) { change_paragraph_style(me, present && present[HTML_DL_COMPACT] ? styles[HTML_DLC] : styles[HTML_DL]); } else if (me->List_Nesting_Level >= 6) { change_paragraph_style(me, present && present[HTML_DL_COMPACT] ? styles[HTML_DLC6] : styles[HTML_DL6]); } else { change_paragraph_style(me, present && present[HTML_DL_COMPACT] ? styles[(HTML_DLC1 - 1) + me->List_Nesting_Level] : styles[(HTML_DL1 - 1) + me->List_Nesting_Level]); } UPDATE_STYLE; /* update to the new style */ CHECK_ID(HTML_DL_ID); break; case HTML_DLC: me->List_Nesting_Level++; /* increment the List nesting level */ if (me->List_Nesting_Level <= 0) { change_paragraph_style(me, styles[HTML_DLC]); } else if (me->List_Nesting_Level >= 6) { change_paragraph_style(me, styles[HTML_DLC6]); } else { change_paragraph_style(me, styles[(HTML_DLC1 - 1) + me->List_Nesting_Level]); } UPDATE_STYLE; /* update to the new style */ CHECK_ID(HTML_DL_ID); break; case HTML_DT: CHECK_ID(HTML_GEN_ID); if (!me->style_change) { BOOL in_line_1 = HText_inLineOne(me->text); HTCoord saved_spaceBefore = me->sp->style->spaceBefore; HTCoord saved_spaceAfter = me->sp->style->spaceAfter; /* * If there are several DT elements and this is not the first, and * the preceding DT element's first (and normally only) line has * not yet been ended, suppress intervening blank line by * temporarily modifying the paragraph style in place. Ugly but * there's ample precedence. - kw */ if (in_line_1) { me->sp->style->spaceBefore = 0; /* temporary change */ me->sp->style->spaceAfter = 0; /* temporary change */ } HText_appendParagraph(me->text); me->sp->style->spaceBefore = saved_spaceBefore; /* undo */ me->sp->style->spaceAfter = saved_spaceAfter; /* undo */ me->in_word = NO; me->sp->style->alignment = HT_LEFT; } me->inP = FALSE; break; case HTML_DD: CHECK_ID(HTML_GEN_ID); HText_setLastChar(me->text, ' '); /* absorb white space */ if (!me->style_change) { if (!HText_LastLineEmpty(me->text, FALSE)) { HText_appendCharacter(me->text, '\r'); } else { HText_NegateLineOne(me->text); } } else { UPDATE_STYLE; HText_appendCharacter(me->text, '\t'); } me->sp->style->alignment = HT_LEFT; me->in_word = NO; me->inP = FALSE; break; case HTML_OL: /* * Set the default TYPE. */ me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = '1'; /* * Check whether we have a starting sequence number, or want to * continue the numbering from a previous OL in this nest. - FM */ if (present && (present[HTML_OL_SEQNUM] || present[HTML_OL_START])) { int seqnum; /* * Give preference to the valid HTML 3.0 SEQNUM attribute name over * the Netscape START attribute name (too bad the Netscape * developers didn't read the HTML 3.0 specs before re-inventing * the "wheel" as "we'll"). - FM */ if (present[HTML_OL_SEQNUM] && non_empty(value[HTML_OL_SEQNUM])) { seqnum = atoi(value[HTML_OL_SEQNUM]); } else if (present[HTML_OL_START] && non_empty(value[HTML_OL_START])) { seqnum = atoi(value[HTML_OL_START]); } else { seqnum = 1; } /* * Don't allow negative numbers less than or equal to our flags, or * numbers less than 1 if an Alphabetic or Roman TYPE. - FM */ if (present[HTML_OL_TYPE] && value[HTML_OL_TYPE]) { if (*value[HTML_OL_TYPE] == 'A') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'A'; if (seqnum < 1) seqnum = 1; } else if (*value[HTML_OL_TYPE] == 'a') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'a'; if (seqnum < 1) seqnum = 1; } else if (*value[HTML_OL_TYPE] == 'I') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'I'; if (seqnum < 1) seqnum = 1; } else if (*value[HTML_OL_TYPE] == 'i') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'i'; if (seqnum < 1) seqnum = 1; } else { if (seqnum <= OL_VOID) seqnum = OL_VOID + 1; } } else if (seqnum <= OL_VOID) { seqnum = OL_VOID + 1; } me->OL_Counter[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = seqnum; } else if (present && present[HTML_OL_CONTINUE]) { me->OL_Counter[me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11] = OL_CONTINUE; } else { me->OL_Counter[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 1; if (present && present[HTML_OL_TYPE] && value[HTML_OL_TYPE]) { if (*value[HTML_OL_TYPE] == 'A') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'A'; } else if (*value[HTML_OL_TYPE] == 'a') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'a'; } else if (*value[HTML_OL_TYPE] == 'I') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'I'; } else if (*value[HTML_OL_TYPE] == 'i') { me->OL_Type[(me->List_Nesting_Level < 11 ? me->List_Nesting_Level + 1 : 11)] = 'i'; } } } me->List_Nesting_Level++; if (me->List_Nesting_Level <= 0) { change_paragraph_style(me, styles[ElementNumber]); } else if (me->List_Nesting_Level >= 6) { change_paragraph_style(me, styles[HTML_OL6]); } else { change_paragraph_style(me, styles[HTML_OL1 + me->List_Nesting_Level - 1]); } UPDATE_STYLE; /* update to the new style */ CHECK_ID(HTML_OL_ID); break; case HTML_UL: me->List_Nesting_Level++; if (me->List_Nesting_Level <= 0) { if (!(present && present[HTML_UL_PLAIN]) && !(present && present[HTML_UL_TYPE] && value[HTML_UL_TYPE] && 0 == strcasecomp(value[HTML_UL_TYPE], "PLAIN"))) { change_paragraph_style(me, styles[ElementNumber]); } else { change_paragraph_style(me, styles[HTML_DIR]); ElementNumber = HTML_DIR; } } else if (me->List_Nesting_Level >= 6) { if (!(present && present[HTML_UL_PLAIN]) && !(present && present[HTML_UL_TYPE] && value[HTML_UL_TYPE] && 0 == strcasecomp(value[HTML_UL_TYPE], "PLAIN"))) { change_paragraph_style(me, styles[HTML_OL6]); } else { change_paragraph_style(me, styles[HTML_MENU6]); ElementNumber = HTML_DIR; } } else { if (!(present && present[HTML_UL_PLAIN]) && !(present && present[HTML_UL_TYPE] && value[HTML_UL_TYPE] && 0 == strcasecomp(value[HTML_UL_TYPE], "PLAIN"))) { change_paragraph_style(me, styles[HTML_OL1 + me->List_Nesting_Level - 1]); } else { change_paragraph_style(me, styles[HTML_MENU1 + me->List_Nesting_Level - 1]); ElementNumber = HTML_DIR; } } UPDATE_STYLE; /* update to the new style */ CHECK_ID(HTML_UL_ID); break; case HTML_MENU: case HTML_DIR: me->List_Nesting_Level++; if (me->List_Nesting_Level <= 0) { change_paragraph_style(me, styles[ElementNumber]); } else if (me->List_Nesting_Level >= 6) { change_paragraph_style(me, styles[HTML_MENU6]); } else { change_paragraph_style(me, styles[HTML_MENU1 + me->List_Nesting_Level - 1]); } UPDATE_STYLE; /* update to the new style */ CHECK_ID(HTML_UL_ID); break; case HTML_LH: UPDATE_STYLE; /* update to the new style */ HText_appendParagraph(me->text); CHECK_ID(HTML_GEN_ID); HTML_put_character(me, HT_NON_BREAK_SPACE); HText_setLastChar(me->text, ' '); me->in_word = NO; me->inP = FALSE; break; case HTML_LI: UPDATE_STYLE; /* update to the new style */ HText_appendParagraph(me->text); me->sp->style->alignment = HT_LEFT; CHECK_ID(HTML_LI_ID); { int surrounding_tag_number = me->sp[0].tag_number; /* * No, a LI should never occur directly within another LI, but this * may result from incomplete error recovery. So check one more * surrounding level in this case. - kw */ if (surrounding_tag_number == HTML_LI && me->sp < (me->stack + MAX_NESTING - 1)) surrounding_tag_number = me->sp[1].tag_number; if (surrounding_tag_number == HTML_OL) { char number_string[20]; int counter, seqnum; char seqtype; counter = me->List_Nesting_Level < 11 ? me->List_Nesting_Level : 11; if (present && present[HTML_LI_TYPE] && value[HTML_LI_TYPE]) { if (*value[HTML_LI_TYPE] == '1') { me->OL_Type[counter] = '1'; } else if (*value[HTML_LI_TYPE] == 'A') { me->OL_Type[counter] = 'A'; } else if (*value[HTML_LI_TYPE] == 'a') { me->OL_Type[counter] = 'a'; } else if (*value[HTML_LI_TYPE] == 'I') { me->OL_Type[counter] = 'I'; } else if (*value[HTML_LI_TYPE] == 'i') { me->OL_Type[counter] = 'i'; } } if (present && present[HTML_LI_VALUE] && ((value[HTML_LI_VALUE] != NULL) && (*value[HTML_LI_VALUE] != '\0')) && ((isdigit(UCH(*value[HTML_LI_VALUE]))) || (*value[HTML_LI_VALUE] == '-' && isdigit(UCH(*(value[HTML_LI_VALUE] + 1)))))) { seqnum = atoi(value[HTML_LI_VALUE]); if (seqnum <= OL_VOID) seqnum = OL_VOID + 1; seqtype = me->OL_Type[counter]; if (seqtype != '1' && seqnum < 1) seqnum = 1; me->OL_Counter[counter] = seqnum + 1; } else if (me->OL_Counter[counter] >= OL_VOID) { seqnum = me->OL_Counter[counter]++; seqtype = me->OL_Type[counter]; if (seqtype != '1' && seqnum < 1) { seqnum = 1; me->OL_Counter[counter] = seqnum + 1; } } else { seqnum = me->Last_OL_Count + 1; seqtype = me->Last_OL_Type; for (i = (counter - 1); i >= 0; i--) { if (me->OL_Counter[i] > OL_VOID) { seqnum = me->OL_Counter[i]++; seqtype = me->OL_Type[i]; i = 0; } } } if (seqtype == 'A') { strcpy(number_string, LYUppercaseA_OL_String(seqnum)); } else if (seqtype == 'a') { strcpy(number_string, LYLowercaseA_OL_String(seqnum)); } else if (seqtype == 'I') { strcpy(number_string, LYUppercaseI_OL_String(seqnum)); } else if (seqtype == 'i') { strcpy(number_string, LYLowercaseI_OL_String(seqnum)); } else { sprintf(number_string, "%2d.", seqnum); } me->Last_OL_Count = seqnum; me->Last_OL_Type = seqtype; /* * Hack, because there is no append string! */ for (i = 0; number_string[i] != '\0'; i++) if (number_string[i] == ' ') HTML_put_character(me, HT_NON_BREAK_SPACE); else HTML_put_character(me, number_string[i]); /* * Use HTML_put_character so that any other spaces coming * through will be collapsed. We'll use nbsp, so it won't * break at the spacing character if there are no spaces in the * subsequent text up to the right margin, but will declare it * as a normal space to ensure collapsing if a normal space * does immediately follow it. - FM */ HTML_put_character(me, HT_NON_BREAK_SPACE); HText_setLastChar(me->text, ' '); } else if (surrounding_tag_number == HTML_UL) { /* * Hack, because there is no append string! */ HTML_put_character(me, HT_NON_BREAK_SPACE); HTML_put_character(me, HT_NON_BREAK_SPACE); switch (me->List_Nesting_Level % 7) { case 0: HTML_put_character(me, '*'); break; case 1: HTML_put_character(me, '+'); break; case 2: HTML_put_character(me, 'o'); break; case 3: HTML_put_character(me, '#'); break; case 4: HTML_put_character(me, '@'); break; case 5: HTML_put_character(me, '-'); break; case 6: HTML_put_character(me, '='); break; } /* * Keep using HTML_put_character so that any other spaces * coming through will be collapsed. We use nbsp, so we won't * wrap at the spacing character if there are no spaces in the * subsequent text up to the right margin, but will declare it * as a normal space to ensure collapsing if a normal space * does immediately follow it. - FM */ HTML_put_character(me, HT_NON_BREAK_SPACE); HText_setLastChar(me->text, ' '); } else { /* * Hack, because there is no append string! */ HTML_put_character(me, HT_NON_BREAK_SPACE); HTML_put_character(me, HT_NON_BREAK_SPACE); HText_setLastChar(me->text, ' '); } } CAN_JUSTIFY_START; me->in_word = NO; me->inP = FALSE; break; case HTML_SPAN: CHECK_ID(HTML_GEN_ID); /* * Should check LANG and/or DIR attributes, and the * me->node_anchor->charset and/or yet to be added structure elements, * and do something here. - FM */ break; case HTML_BDO: CHECK_ID(HTML_GEN_ID); /* * Should check DIR (and LANG) attributes, and the * me->node_anchor->charset and/or yet to be added structure elements, * and do something here. - FM */ break; case HTML_SPOT: CHECK_ID(HTML_GEN_ID); break; case HTML_FN: change_paragraph_style(me, styles[ElementNumber]); UPDATE_STYLE; if (me->sp->tag_number == (int) ElementNumber) LYEnsureDoubleSpace(me); CHECK_ID(HTML_GEN_ID); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "FOOTNOTE:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); CAN_JUSTIFY_START me->inLABEL = TRUE; me->in_word = NO; me->inP = FALSE; break; case HTML_A: /* * If we are looking for client-side image maps, then handle an A * within a MAP that has a COORDS attribute as an AREA tag. * Unfortunately we lose the anchor text this way for the LYNXIMGMAP, * we would have to do much more parsing to collect it. After * potentially handling the A as AREA, always return immediately if * only looking for image maps, without pushing anything on the style * stack. - kw */ if (me->map_address && present && present[HTML_A_COORDS]) LYStartArea(me, present[HTML_A_HREF] ? value[HTML_A_HREF] : NULL, NULL, present[HTML_A_TITLE] ? value[HTML_A_TITLE] : NULL, tag_charset); if (LYMapsOnly) { return HT_OK; } /* * A may have been declared SGML_EMPTY in HTMLDTD.c, and * SGML_character() in SGML.c may check for an A end tag to call * HTML_end_element() directly (with a check in that to bypass * decrementing of the HTML parser's stack), so if we have an open A, * close that one now. - FM & kw */ if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } /* * Set to know we are in an anchor. */ me->inA = TRUE; /* * Load id_string if we have an ID or NAME. - FM */ if (present && present[HTML_A_ID] && non_empty(value[HTML_A_ID])) { StrAllocCopy(id_string, value[HTML_A_ID]); } else if (present && present[HTML_A_NAME] && non_empty(value[HTML_A_NAME])) { StrAllocCopy(id_string, value[HTML_A_NAME]); } if (id_string) TRANSLATE_AND_UNESCAPE_TO_STD(&id_string); /* * Handle the reference. - FM */ if (present && present[HTML_A_HREF]) { /* * Set to know we are making the content bold. */ me->inBoldA = TRUE; if (isEmpty(value[HTML_A_HREF])) StrAllocCopy(href, "#"); else StrAllocCopy(href, value[HTML_A_HREF]); CHECK_FOR_INTERN(intern_flag, href); /* '#' */ if (intern_flag) { /*** FAST WAY: ***/ TRANSLATE_AND_UNESCAPE_TO_STD(&href); } else { url_type = LYLegitimizeHREF(me, &href, TRUE, TRUE); /* * Deal with our ftp gateway kludge. - FM */ if (!url_type && !StrNCmp(href, "/foo/..", 7) && (isFTP_URL(me->node_anchor->address) || isFILE_URL(me->node_anchor->address))) { for (i = 0; (href[i] = href[i + 7]) != 0; i++) ; } } if (present[HTML_A_ISMAP]) /*??? */ intern_flag = FALSE; } else { if (bold_name_anchors == TRUE) { me->inBoldA = TRUE; } } if (present && present[HTML_A_TYPE] && value[HTML_A_TYPE]) { StrAllocCopy(temp, value[HTML_A_TYPE]); if (!intern_flag && !strcasecomp(value[HTML_A_TYPE], HTAtom_name(HTInternalLink)) && !LYIsUIPage3(me->node_anchor->address, UIP_LIST_PAGE, 0) && !LYIsUIPage3(me->node_anchor->address, UIP_ADDRLIST_PAGE, 0) && !isLYNXIMGMAP(me->node_anchor->address)) { /* Some kind of spoof? * Found TYPE="internal link" but not in a valid context * where we have written it. - kw */ CTRACE((tfp, "HTML: Found invalid HREF=\"%s\" TYPE=\"%s\"!\n", href, temp)); FREE(temp); } } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ href, /* Address */ (temp ? (HTLinkType *) HTAtom_for(temp) : INTERN_LT)); /* Type */ FREE(temp); FREE(id_string); if (me->CurrentA && present) { if (present[HTML_A_TITLE] && non_empty(value[HTML_A_TITLE])) { StrAllocCopy(title, value[HTML_A_TITLE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&title, TRUE, FALSE); LYTrimHead(title); LYTrimTail(title); if (*title == '\0') { FREE(title); } } if (present[HTML_A_ISMAP]) dest_ismap = TRUE; if (present[HTML_A_CHARSET] && non_empty(value[HTML_A_CHARSET])) { /* * Set up to load the anchor's chartrans structures * appropriately for the current display character set if it * can handle what's claimed. - FM */ StrAllocCopy(temp, value[HTML_A_CHARSET]); TRANSLATE_AND_UNESCAPE_TO_STD(&temp); dest_char_set = UCGetLYhndl_byMIME(temp); if (dest_char_set < 0) { dest_char_set = UCLYhndl_for_unrec; } } if (title != NULL || dest_ismap == TRUE || dest_char_set >= 0) { dest = HTAnchor_parent(HTAnchor_followLink(me->CurrentA) ); } if (dest && title != NULL && HTAnchor_title(dest) == NULL) HTAnchor_setTitle(dest, title); if (dest && dest_ismap) dest->isISMAPScript = TRUE; /* Don't allow CHARSET attribute to change *this* document's charset assumption. - kw */ if (dest && dest != me->node_anchor && dest_char_set >= 0) { /* * Load the anchor's chartrans structures. This should be done * more intelligently when setting up the structured object, * but it gets the job done for now. - FM */ HTAnchor_setUCInfoStage(dest, dest_char_set, UCT_STAGE_MIME, UCT_SETBY_DEFAULT); HTAnchor_setUCInfoStage(dest, dest_char_set, UCT_STAGE_PARSER, UCT_SETBY_LINK); } FREE(temp); dest = NULL; FREE(title); } me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldA == TRUE && me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); #if defined(NOTUSED_FOTEMODS) /* * Close an HREF-less NAMED-ed now if we aren't making their content * bold, and let the check in HTML_end_element() deal with any dangling * end tag this creates. - FM */ if (href == NULL && me->inBoldA == FALSE) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } #else /*Close an HREF-less NAMED-ed now if force_empty_hrefless_a was requested - VH */ if (href == NULL && force_empty_hrefless_a) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } #endif FREE(href); break; case HTML_IMG: /* Images */ /* * If we're in an anchor, get the destination, and if it's a clickable * image for the current anchor, set our flags for faking a 0,0 * coordinate pair, which typically returns the image's default. - FM */ if (me->inA && me->CurrentA) { if ((dest = HTAnchor_parent(HTAnchor_followLink(me->CurrentA) )) != NULL) { if (dest->isISMAPScript == TRUE) { dest_ismap = TRUE; CTRACE((tfp, "HTML: '%s' is an ISMAP script\n", dest->address)); } else if (present && present[HTML_IMG_ISMAP]) { dest_ismap = TRUE; dest->isISMAPScript = TRUE; CTRACE((tfp, "HTML: Designating '%s' as an ISMAP script\n", dest->address)); } } } intern_flag = FALSE; /* unless set below - kw */ /* * If there's a USEMAP, resolve it. - FM */ if (present && present[HTML_IMG_USEMAP] && non_empty(value[HTML_IMG_USEMAP])) { StrAllocCopy(map_href, value[HTML_IMG_USEMAP]); CHECK_FOR_INTERN(intern_flag, map_href); (void) LYLegitimizeHREF(me, &map_href, TRUE, TRUE); /* * If map_href ended up zero-length or otherwise doesn't have a * hash, it can't be valid, so ignore it. - FM */ if (findPoundSelector(map_href) == NULL) { FREE(map_href); } } /* * Handle a MAP reference if we have one at this point. - FM */ if (map_href) { /* * If the MAP reference doesn't yet begin with a scheme, check * whether a base tag is in effect. - FM */ /* * If the USEMAP value is a lone fragment and LYSeekFragMAPinCur is * set, we'll use the current document's URL for resolving. * Otherwise use the BASE. - kw */ Base = ((me->inBASE && !(*map_href == '#' && LYSeekFragMAPinCur == TRUE)) ? me->base_href : me->node_anchor->address); HTParseALL(&map_href, Base); /* * Prepend our client-side MAP access field. - FM */ StrAllocCopy(temp, STR_LYNXIMGMAP); StrAllocCat(temp, map_href); StrAllocCopy(map_href, temp); FREE(temp); } /* * Check whether we want to suppress the server-side ISMAP link if a * client-side MAP is present. - FM */ if (LYNoISMAPifUSEMAP && map_href && dest_ismap) { dest_ismap = FALSE; dest = NULL; } /* * Check for a TITLE attribute. - FM */ if (present && present[HTML_IMG_TITLE] && non_empty(value[HTML_IMG_TITLE])) { StrAllocCopy(title, value[HTML_IMG_TITLE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&title, TRUE, FALSE); LYTrimHead(title); LYTrimTail(title); if (*title == '\0') { FREE(title); } } /* * If there's an ALT string, use it, unless the ALT string is * zero-length or just spaces and we are making all SRCs links or have * a USEMAP link. - FM */ if (((present) && (present[HTML_IMG_ALT] && value[HTML_IMG_ALT])) && (!clickable_images || ((clickable_images || map_href) && *value[HTML_IMG_ALT] != '\0'))) { StrAllocCopy(alt_string, value[HTML_IMG_ALT]); TRANSLATE_AND_UNESCAPE_ENTITIES(&alt_string, me->UsePlainSpace, me->HiddenValue); /* * If it's all spaces and we are making SRC or USEMAP links, treat * it as zero-length. - FM */ if (clickable_images || map_href) { LYTrimHead(alt_string); LYTrimTail(alt_string); if (*alt_string == '\0') { if (map_href) { StrAllocCopy(alt_string, (title ? title : (temp = MakeNewMapValue(value, "USEMAP")))); FREE(temp); } else if (dest_ismap) { StrAllocCopy(alt_string, (title ? title : (temp = MakeNewMapValue(value, "ISMAP")))); FREE(temp); } else if (me->inA == TRUE && dest) { StrAllocCopy(alt_string, (title ? title : VERBOSE_IMG(value, HTML_IMG_SRC, "[LINK]"))); } else { StrAllocCopy(alt_string, (title ? title : ((present && present[HTML_IMG_ISOBJECT]) ? "(OBJECT)" : VERBOSE_IMG(value, HTML_IMG_SRC, "[INLINE]")))); } } } } else if (map_href) { StrAllocCopy(alt_string, (title ? title : (temp = MakeNewMapValue(value, "USEMAP")))); FREE(temp); } else if ((dest_ismap == TRUE) || (me->inA && present && present[HTML_IMG_ISMAP])) { StrAllocCopy(alt_string, (title ? title : (temp = MakeNewMapValue(value, "ISMAP")))); FREE(temp); } else if (me->inA == TRUE && dest) { StrAllocCopy(alt_string, (title ? title : VERBOSE_IMG(value, HTML_IMG_SRC, "[LINK]"))); } else { if (pseudo_inline_alts || clickable_images) StrAllocCopy(alt_string, (title ? title : ((present && present[HTML_IMG_ISOBJECT]) ? "(OBJECT)" : VERBOSE_IMG(value, HTML_IMG_SRC, "[INLINE]")))); else StrAllocCopy(alt_string, NonNull(title)); } if (*alt_string == '\0' && map_href) { StrAllocCopy(alt_string, (temp = MakeNewMapValue(value, "USEMAP"))); FREE(temp); } CTRACE((tfp, "HTML IMG: USEMAP=%d ISMAP=%d ANCHOR=%d PARA=%d\n", map_href ? 1 : 0, (dest_ismap == TRUE) ? 1 : 0, me->inA, me->inP)); /* * Check for an ID attribute. - FM */ if (present && present[HTML_IMG_ID] && non_empty(value[HTML_IMG_ID])) { StrAllocCopy(id_string, value[HTML_IMG_ID]); TRANSLATE_AND_UNESCAPE_TO_STD(&id_string); if (*id_string == '\0') { FREE(id_string); } } /* * Create links to the SRC for all images, if desired. - FM */ if (clickable_images && present && present[HTML_IMG_SRC] && non_empty(value[HTML_IMG_SRC])) { StrAllocCopy(href, value[HTML_IMG_SRC]); LYLegitimizeHREF(me, &href, TRUE, TRUE); /* * If it's an ISMAP and/or USEMAP, or graphic for an anchor, end * that anchor and start one for the SRC. - FM */ if (me->inA) { /* * If we have a USEMAP, end this anchor and start a new one for * the client-side MAP. - FM */ if (map_href) { if (dest_ismap) { HTML_put_character(me, ' '); me->in_word = NO; HTML_put_string(me, (temp = MakeNewMapValue(value, "ISMAP"))); FREE(temp); } else if (dest) { HTML_put_character(me, ' '); me->in_word = NO; HTML_put_string(me, "[LINK]"); } if (me->inBoldA == TRUE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_END_CHAR); } me->inBoldA = FALSE; HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; if (dest_ismap || dest) HTML_put_character(me, '-'); if (id_string) { if ((ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ NULL, /* Addresss */ 0)) != NULL) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); } } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ map_href, /* Addresss */ INTERN_LT); /* Type */ if (me->CurrentA && title) { if ((dest = HTAnchor_parent(HTAnchor_followLink(me->CurrentA) )) != NULL) { if (!HTAnchor_title(dest)) HTAnchor_setTitle(dest, title); } } me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldA == FALSE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_START_CHAR); } me->inBoldA = TRUE; } else { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } HTML_put_string(me, alt_string); if (me->inBoldA == TRUE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_END_CHAR); } me->inBoldA = FALSE; HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; HTML_put_character(me, '-'); FREE(newtitle); StrAllocCopy(alt_string, ((present && present[HTML_IMG_ISOBJECT]) ? ((map_href || dest_ismap) ? "(IMAGE)" : "(OBJECT)") : VERBOSE_IMG(value, HTML_IMG_SRC, "[IMAGE]"))); if (id_string && !map_href) { if ((ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ NULL, /* Addresss */ 0)) != NULL) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); } } } else if (map_href) { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; if (id_string) { if ((ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ NULL, /* Addresss */ 0)) != NULL) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); } } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ map_href, /* Addresss */ INTERN_LT); /* Type */ if (me->CurrentA && title) { if ((dest = HTAnchor_parent(HTAnchor_followLink(me->CurrentA) )) != NULL) { if (!HTAnchor_title(dest)) HTAnchor_setTitle(dest, title); } } me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldA == FALSE && me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); me->inBoldA = TRUE; HTML_put_string(me, alt_string); if (me->inBoldA == TRUE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_END_CHAR); } me->inBoldA = FALSE; HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; HTML_put_character(me, '-'); FREE(newtitle); StrAllocCopy(alt_string, ((present && present[HTML_IMG_ISOBJECT]) ? "(IMAGE)" : VERBOSE_IMG(value, HTML_IMG_SRC, "[IMAGE]"))); } else { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; if (id_string) { if ((ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ NULL, /* Addresss */ 0)) != NULL) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); } } } /* * Create the link to the SRC. - FM */ me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ FREE(href); me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, alt_string); if (!me->inA) { if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } else { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; me->inBoldA = TRUE; } } else if (map_href) { if (me->inA) { /* * We're in an anchor and have a USEMAP, so end the anchor and * start a new one for the client-side MAP. - FM */ if (dest_ismap) { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; HTML_put_string(me, (temp = MakeNewMapValue(value, "ISMAP"))); FREE(temp); } else if (dest) { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; HTML_put_string(me, "[LINK]"); } if (me->inBoldA == TRUE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_END_CHAR); } me->inBoldA = FALSE; HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; if (dest_ismap || dest) { HTML_put_character(me, '-'); } } else { HTML_put_character(me, ' '); me->in_word = NO; } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ map_href, /* Addresss */ INTERN_LT); /* Type */ if (me->CurrentA && title) { if ((dest = HTAnchor_parent(HTAnchor_followLink(me->CurrentA) )) != NULL) { if (!HTAnchor_title(dest)) HTAnchor_setTitle(dest, title); } } me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldA == FALSE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_START_CHAR); } me->inBoldA = TRUE; HTML_put_string(me, alt_string); if (!me->inA) { if (me->inBoldA == TRUE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_END_CHAR); } me->inBoldA = FALSE; HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; } } else { /* * Just put in the ALT or pseudo-ALT string for the current anchor * or inline, with an ID link if indicated. - FM */ HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; if (id_string) { if ((ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ NULL, /* Addresss */ (HTLinkType *) 0)) != NULL) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); } } HTML_put_string(me, alt_string); HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } FREE(map_href); FREE(alt_string); FREE(id_string); FREE(title); FREE(newtitle); dest = NULL; break; case HTML_MAP: /* * Load id_string if we have a NAME or ID. - FM */ if (present && present[HTML_MAP_NAME] && non_empty(value[HTML_MAP_NAME])) { StrAllocCopy(id_string, value[HTML_MAP_NAME]); } else if (present && present[HTML_MAP_ID] && non_empty(value[HTML_MAP_ID])) { StrAllocCopy(id_string, value[HTML_MAP_ID]); } if (id_string) { TRANSLATE_AND_UNESCAPE_TO_STD(&id_string); if (*id_string == '\0') { FREE(id_string); } } /* * Generate a target anchor in this place in the containing document. * MAP can now contain block markup, if it doesn't contain any AREAs * (or A anchors with COORDS converted to AREAs) the current location * can be used as a fallback for following a USEMAP link. - kw */ if (!LYMapsOnly) LYHandleID(me, id_string); /* * Load map_address. - FM */ if (id_string) { /* * The MAP must be in the current stream, even if it had a BASE * tag, so we'll use its address here, but still use the BASE, if * present, when resolving the AREA elements in it's content, * unless the AREA's HREF is a lone fragment and * LYSeekFragAREAinCur is set. - FM && KW */ StrAllocCopy(me->map_address, me->node_anchor->address); if ((cp = StrChr(me->map_address, '#')) != NULL) *cp = '\0'; StrAllocCat(me->map_address, "#"); StrAllocCat(me->map_address, id_string); FREE(id_string); if (present && present[HTML_MAP_TITLE] && non_empty(value[HTML_MAP_TITLE])) { StrAllocCopy(title, value[HTML_MAP_TITLE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&title, TRUE, FALSE); LYTrimHead(title); LYTrimTail(title); if (*title == '\0') { FREE(title); } } LYAddImageMap(me->map_address, title, me->node_anchor); FREE(title); } break; case HTML_AREA: if (me->map_address && present && present[HTML_AREA_HREF] && non_empty(value[HTML_AREA_HREF])) { /* * Resolve the HREF. - FM */ StrAllocCopy(href, value[HTML_AREA_HREF]); CHECK_FOR_INTERN(intern_flag, href); (void) LYLegitimizeHREF(me, &href, TRUE, TRUE); /* * Check whether a BASE tag is in effect, and use it for resolving, * even though we used this stream's address for locating the MAP * itself, unless the HREF is a lone fragment and * LYSeekFragAREAinCur is set. - FM */ Base = (((me->inBASE && *href != '\0') && !(*href == '#' && LYSeekFragAREAinCur == TRUE)) ? me->base_href : me->node_anchor->address); HTParseALL(&href, Base); /* * Check for an ALT. - FM */ if (present[HTML_AREA_ALT] && non_empty(value[HTML_AREA_ALT])) { StrAllocCopy(alt_string, value[HTML_AREA_ALT]); } else if (present[HTML_AREA_TITLE] && non_empty(value[HTML_AREA_TITLE])) { /* * Use the TITLE as an ALT. - FM */ StrAllocCopy(alt_string, value[HTML_AREA_TITLE]); } if (alt_string != NULL) { TRANSLATE_AND_UNESCAPE_ENTITIES(&alt_string, me->UsePlainSpace, me->HiddenValue); /* * Make sure it's not just space(s). - FM */ LYTrimHead(alt_string); LYTrimTail(alt_string); if (*alt_string == '\0') { StrAllocCopy(alt_string, href); } } else { /* * Use the HREF as an ALT. - FM */ StrAllocCopy(alt_string, href); } LYAddMapElement(me->map_address, href, alt_string, me->node_anchor, intern_flag); FREE(href); FREE(alt_string); } break; case HTML_PARAM: /* * We may need to look at this someday to deal with MAPs, OBJECTs or * APPLETs optimally, but just ignore it for now. - FM */ break; case HTML_BODYTEXT: CHECK_ID(HTML_BODYTEXT_ID); /* * We may need to look at this someday to deal with OBJECTs optimally, * but just ignore it for now. - FM */ break; case HTML_TEXTFLOW: CHECK_ID(HTML_BODYTEXT_ID); /* * We may need to look at this someday to deal with APPLETs optimally, * but just ignore it for now. - FM */ break; case HTML_FIG: if (present) LYHandleFIG(me, present, value, present[HTML_FIG_ISOBJECT], present[HTML_FIG_IMAGEMAP], present[HTML_FIG_ID] ? value[HTML_FIG_ID] : NULL, present[HTML_FIG_SRC] ? value[HTML_FIG_SRC] : NULL, YES, TRUE, &intern_flag); else LYHandleFIG(me, NULL, NULL, 0, 0, NULL, NULL, YES, TRUE, &intern_flag); break; case HTML_OBJECT: if (!me->object_started) { /* * This is an outer OBJECT start tag, i.e., not a nested OBJECT, so * save its relevant attributes. - FM */ if (present) { if (present[HTML_OBJECT_DECLARE]) me->object_declare = TRUE; if (present[HTML_OBJECT_SHAPES]) me->object_shapes = TRUE; if (present[HTML_OBJECT_ISMAP]) me->object_ismap = TRUE; if (present[HTML_OBJECT_USEMAP] && non_empty(value[HTML_OBJECT_USEMAP])) { StrAllocCopy(me->object_usemap, value[HTML_OBJECT_USEMAP]); TRANSLATE_AND_UNESCAPE_TO_STD(&me->object_usemap); if (*me->object_usemap == '\0') { FREE(me->object_usemap); } } if (present[HTML_OBJECT_ID] && non_empty(value[HTML_OBJECT_ID])) { StrAllocCopy(me->object_id, value[HTML_OBJECT_ID]); TRANSLATE_AND_UNESCAPE_TO_STD(&me->object_id); if (*me->object_id == '\0') { FREE(me->object_id); } } if (present[HTML_OBJECT_TITLE] && non_empty(value[HTML_OBJECT_TITLE])) { StrAllocCopy(me->object_title, value[HTML_OBJECT_TITLE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&me->object_title, TRUE, FALSE); LYTrimHead(me->object_title); LYTrimTail(me->object_title); if (*me->object_title == '\0') { FREE(me->object_title); } } if (present[HTML_OBJECT_DATA] && non_empty(value[HTML_OBJECT_DATA])) { StrAllocCopy(me->object_data, value[HTML_OBJECT_DATA]); TRANSLATE_AND_UNESCAPE_TO_STD(&me->object_data); if (*me->object_data == '\0') { FREE(me->object_data); } } if (present[HTML_OBJECT_TYPE] && non_empty(value[HTML_OBJECT_TYPE])) { StrAllocCopy(me->object_type, value[HTML_OBJECT_TYPE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&me->object_type, TRUE, FALSE); LYTrimHead(me->object_type); LYTrimTail(me->object_type); if (*me->object_type == '\0') { FREE(me->object_type); } } if (present[HTML_OBJECT_CLASSID] && non_empty(value[HTML_OBJECT_CLASSID])) { StrAllocCopy(me->object_classid, value[HTML_OBJECT_CLASSID]); TRANSLATE_AND_UNESCAPE_ENTITIES(&me->object_classid, TRUE, FALSE); LYTrimHead(me->object_classid); LYTrimTail(me->object_classid); if (*me->object_classid == '\0') { FREE(me->object_classid); } } if (present[HTML_OBJECT_CODEBASE] && non_empty(value[HTML_OBJECT_CODEBASE])) { StrAllocCopy(me->object_codebase, value[HTML_OBJECT_CODEBASE]); TRANSLATE_AND_UNESCAPE_TO_STD(&me->object_codebase); if (*me->object_codebase == '\0') { FREE(me->object_codebase); } } if (present[HTML_OBJECT_CODETYPE] && non_empty(value[HTML_OBJECT_CODETYPE])) { StrAllocCopy(me->object_codetype, value[HTML_OBJECT_CODETYPE]); TRANSLATE_AND_UNESCAPE_ENTITIES(&me->object_codetype, TRUE, FALSE); LYTrimHead(me->object_codetype); LYTrimTail(me->object_codetype); if (*me->object_codetype == '\0') { FREE(me->object_codetype); } } if (present[HTML_OBJECT_NAME] && non_empty(value[HTML_OBJECT_NAME])) { StrAllocCopy(me->object_name, value[HTML_OBJECT_NAME]); TRANSLATE_AND_UNESCAPE_ENTITIES(&me->object_name, TRUE, FALSE); LYTrimHead(me->object_name); LYTrimTail(me->object_name); if (*me->object_name == '\0') { FREE(me->object_name); } } } /* * If we can determine now that we are not going to do anything * special to the OBJECT element's SGML contents, like skipping it * completely or collecting it up in order to add something after * it, then generate any output that should be emitted in the place * of the OBJECT start tag NOW, then don't initialize special * handling but return, letting our SGML parser know that further * content is to be parsed normally not literally. We could defer * this until we have collected the contents and then recycle the * contents (as was previously always done), but that has a higher * chance of completely losing content in case of nesting errors in * the input, incomplete transmissions, etc. - kw */ if ((!present || (me->object_declare == FALSE && me->object_name == NULL && me->object_shapes == FALSE && me->object_usemap == NULL))) { if (!LYMapsOnly) { if (!clickable_images || me->object_data == NULL || !(me->object_data != NULL && me->object_classid == NULL && me->object_codebase == NULL && me->object_codetype == NULL)) FREE(me->object_data); if (me->object_data) { HTStartAnchor5(me, (me->object_id ? value[HTML_OBJECT_ID] : NULL), value[HTML_OBJECT_DATA], value[HTML_OBJECT_TYPE], tag_charset); if ((me->object_type != NULL) && !strncasecomp(me->object_type, "image/", 6)) HTML_put_string(me, "(IMAGE)"); else HTML_put_string(me, "(OBJECT)"); HTML_end_element(me, HTML_A, NULL); } else if (me->object_id) LYHandleID(me, me->object_id); } clear_objectdata(me); /* * We do NOT want the HTML_put_* functions that are going to be * called for the OBJECT's character content to add to the * chunk, so we don't push on the stack. Instead we keep a * counter for open OBJECT tags that are treated this way, so * HTML_end_element can skip handling the corresponding end tag * that is going to arrive unexpectedly as far as our stack is * concerned. */ status = HT_PARSER_OTHER_CONTENT; if (me->sp[0].tag_number == HTML_FIG && me->objects_figged_open > 0) { ElementNumber = (HTMLElement) HTML_OBJECT_M; } else { me->objects_mixed_open++; SET_SKIP_STACK(HTML_OBJECT); } } else if (me->object_declare == FALSE && me->object_name == NULL && me->object_shapes == TRUE) { LYHandleFIG(me, present, value, 1, 1 || me->object_ismap, me->object_id, ((me->object_data && !me->object_classid) ? value[HTML_OBJECT_DATA] : NULL), NO, TRUE, &intern_flag); clear_objectdata(me); status = HT_PARSER_OTHER_CONTENT; me->objects_figged_open++; ElementNumber = HTML_FIG; } else { /* * Set flag that we are accumulating OBJECT content. - FM */ me->object_started = TRUE; } } break; case HTML_OVERLAY: if (clickable_images && me->inFIG && present && present[HTML_OVERLAY_SRC] && non_empty(value[HTML_OVERLAY_SRC])) { StrAllocCopy(href, value[HTML_OVERLAY_SRC]); LYLegitimizeHREF(me, &href, TRUE, TRUE); if (*href) { if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ HTML_put_character(me, ' '); HText_appendCharacter(me->text, '+'); me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, "[OVERLAY]"); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); HTML_put_character(me, ' '); me->in_word = NO; } FREE(href); } break; case HTML_APPLET: me->inAPPLET = TRUE; me->inAPPLETwithP = FALSE; HTML_put_character(me, ' '); /* space char may be ignored */ /* * Load id_string if we have an ID or NAME. - FM */ if (present && present[HTML_APPLET_ID] && non_empty(value[HTML_APPLET_ID])) { StrAllocCopy(id_string, value[HTML_APPLET_ID]); } else if (present && present[HTML_APPLET_NAME] && non_empty(value[HTML_APPLET_NAME])) { StrAllocCopy(id_string, value[HTML_APPLET_NAME]); } if (id_string) { TRANSLATE_AND_UNESCAPE_TO_STD(&id_string); LYHandleID(me, id_string); FREE(id_string); } me->in_word = NO; /* * If there's an ALT string, use it, unless the ALT string is * zero-length and we are making all sources links. - FM */ if (present && present[HTML_APPLET_ALT] && value[HTML_APPLET_ALT] && (!clickable_images || (clickable_images && *value[HTML_APPLET_ALT] != '\0'))) { StrAllocCopy(alt_string, value[HTML_APPLET_ALT]); TRANSLATE_AND_UNESCAPE_ENTITIES(&alt_string, me->UsePlainSpace, me->HiddenValue); /* * If it's all spaces and we are making sources links, treat it as * zero-length. - FM */ if (clickable_images) { LYTrimHead(alt_string); LYTrimTail(alt_string); if (*alt_string == '\0') { StrAllocCopy(alt_string, "[APPLET]"); } } } else { if (clickable_images) StrAllocCopy(alt_string, "[APPLET]"); else StrAllocCopy(alt_string, ""); } /* * If we're making all sources links, get the source. - FM */ if (clickable_images && present && present[HTML_APPLET_CODE] && non_empty(value[HTML_APPLET_CODE])) { char *base = NULL; Base = (me->inBASE) ? me->base_href : me->node_anchor->address; /* * Check for a CODEBASE attribute. - FM */ if (present[HTML_APPLET_CODEBASE] && non_empty(value[HTML_APPLET_CODEBASE])) { StrAllocCopy(base, value[HTML_APPLET_CODEBASE]); LYRemoveBlanks(base); TRANSLATE_AND_UNESCAPE_TO_STD(&base); /* * Force it to be a directory. - FM */ if (*base == '\0') StrAllocCopy(base, "/"); LYAddHtmlSep(&base); LYLegitimizeHREF(me, &base, TRUE, FALSE); HTParseALL(&base, Base); } StrAllocCopy(href, value[HTML_APPLET_CODE]); LYLegitimizeHREF(me, &href, TRUE, FALSE); HTParseALL(&href, (base ? base : Base)); FREE(base); if (*href) { if (me->inA) { if (me->inBoldA == TRUE && me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); HTML_put_character(me, '-'); } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, alt_string); if (me->inA == FALSE) { if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; } HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } FREE(href); } else if (*alt_string) { /* * Just put up the ALT string, if non-zero. - FM */ HTML_put_string(me, alt_string); HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } FREE(alt_string); FREE(id_string); break; case HTML_BGSOUND: /* * If we're making all sources links, get the source. - FM */ if (clickable_images && present && present[HTML_BGSOUND_SRC] && non_empty(value[HTML_BGSOUND_SRC])) { StrAllocCopy(href, value[HTML_BGSOUND_SRC]); LYLegitimizeHREF(me, &href, TRUE, TRUE); if (*href == '\0') { FREE(href); break; } if (me->inA) { if (me->inBoldA == TRUE && me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); HTML_put_character(me, '-'); } else { HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, "[BGSOUND]"); if (me->inA == FALSE) { if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; } HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; FREE(href); } break; case HTML_EMBED: if (pseudo_inline_alts || clickable_images) HTML_put_character(me, ' '); /* space char may be ignored */ /* * Load id_string if we have an ID or NAME. - FM */ if (present && present[HTML_EMBED_ID] && non_empty(value[HTML_EMBED_ID])) { StrAllocCopy(id_string, value[HTML_EMBED_ID]); } else if (present && present[HTML_EMBED_NAME] && non_empty(value[HTML_EMBED_NAME])) { StrAllocCopy(id_string, value[HTML_EMBED_NAME]); } if (id_string) { TRANSLATE_AND_UNESCAPE_TO_STD(&id_string); LYHandleID(me, id_string); FREE(id_string); } if (pseudo_inline_alts || clickable_images) me->in_word = NO; /* * If there's an ALT string, use it, unless the ALT string is * zero-length and we are making all sources links. - FM */ if (present && present[HTML_EMBED_ALT] && value[HTML_EMBED_ALT] && (!clickable_images || (clickable_images && *value[HTML_EMBED_ALT] != '\0'))) { StrAllocCopy(alt_string, value[HTML_EMBED_ALT]); TRANSLATE_AND_UNESCAPE_ENTITIES(&alt_string, me->UsePlainSpace, me->HiddenValue); /* * If it's all spaces and we are making sources links, treat it as * zero-length. - FM */ if (clickable_images) { LYTrimHead(alt_string); LYTrimTail(alt_string); if (*alt_string == '\0') { StrAllocCopy(alt_string, "[EMBED]"); } } } else { if (pseudo_inline_alts || clickable_images) StrAllocCopy(alt_string, "[EMBED]"); else StrAllocCopy(alt_string, ""); } /* * If we're making all sources links, get the source. - FM */ if (clickable_images && present && present[HTML_EMBED_SRC] && non_empty(value[HTML_EMBED_SRC])) { StrAllocCopy(href, value[HTML_EMBED_SRC]); LYLegitimizeHREF(me, &href, TRUE, TRUE); if (*href) { if (me->inA) { if (me->inBoldA == TRUE && me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); HTML_put_character(me, '-'); } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ me->CurrentANum = HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, alt_string); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); if (me->inA == FALSE) { if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; } HTML_put_character(me, ' '); me->in_word = NO; } FREE(href); } else if (*alt_string) { /* * Just put up the ALT string, if non-zero. - FM */ HTML_put_string(me, alt_string); HTML_put_character(me, ' '); /* space char may be ignored */ me->in_word = NO; } FREE(alt_string); FREE(id_string); break; case HTML_CREDIT: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); me->inCREDIT = TRUE; CHECK_ID(HTML_GEN_ID); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "CREDIT:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); CAN_JUSTIFY_START; if (me->inFIG) /* * Assume all text in the FIG container is intended to be * paragraphed. - FM */ me->inFIGwithP = TRUE; if (me->inAPPLET) /* * Assume all text in the APPLET container is intended to be * paragraphed. - FM */ me->inAPPLETwithP = TRUE; me->inLABEL = TRUE; me->in_word = NO; me->inP = FALSE; break; case HTML_CAPTION: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); me->inCAPTION = TRUE; CHECK_ID(HTML_CAPTION_ID); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "CAPTION:"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); CAN_JUSTIFY_START; if (me->inFIG) /* * Assume all text in the FIG container is intended to be * paragraphed. - FM */ me->inFIGwithP = TRUE; if (me->inAPPLET) /* * Assume all text in the APPLET container is intended to be * paragraphed. - FM */ me->inAPPLETwithP = TRUE; me->inLABEL = TRUE; me->in_word = NO; me->inP = FALSE; break; case HTML_FORM: { char *action = NULL; char *method = NULL; char *enctype = NULL; const char *accept_cs = NULL; HTChildAnchor *source; HTAnchor *link_dest; /* * FORM may have been declared SGML_EMPTY in HTMLDTD.c, and * SGML_character() in SGML.c may check for a FORM end tag to call * HTML_end_element() directly (with a check in that to bypass * decrementing of the HTML parser's stack), so if we have an open * FORM, close that one now. - FM */ if (me->inFORM) { CTRACE((tfp, "HTML: Missing FORM end tag. Faking it!\n")); SET_SKIP_STACK(HTML_FORM); HTML_end_element(me, HTML_FORM, include); } /* * Set to know we are in a new form. */ me->inFORM = TRUE; EMIT_IFDEF_USE_JUSTIFY_ELTS(form_in_htext = TRUE); if (present && present[HTML_FORM_ACCEPT_CHARSET]) { accept_cs = (value[HTML_FORM_ACCEPT_CHARSET] ? value[HTML_FORM_ACCEPT_CHARSET] : "UNKNOWN"); } Base = (me->inBASE) ? me->base_href : me->node_anchor->address; if (present && present[HTML_FORM_ACTION] && value[HTML_FORM_ACTION]) { StrAllocCopy(action, value[HTML_FORM_ACTION]); LYLegitimizeHREF(me, &action, TRUE, TRUE); /* * Check whether a base tag is in effect. Note that actions * always are resolved w.r.t. to the base, even if the action * is empty. - FM */ HTParseALL(&action, Base); } else { StrAllocCopy(action, Base); } source = HTAnchor_findChildAndLink(me->node_anchor, NULL, action, (HTLinkType *) 0); if ((link_dest = HTAnchor_followLink(source)) != NULL) { /* * Memory leak fixed. 05-28-94 Lynx 2-3-1 Garrett Arch Blythe */ char *cp_freeme = HTAnchor_address(link_dest); if (cp_freeme != NULL) { StrAllocCopy(action, cp_freeme); FREE(cp_freeme); } else { StrAllocCopy(action, ""); } } if (present && present[HTML_FORM_METHOD]) StrAllocCopy(method, (value[HTML_FORM_METHOD] ? value[HTML_FORM_METHOD] : "GET")); if (present && present[HTML_FORM_ENCTYPE] && non_empty(value[HTML_FORM_ENCTYPE])) { StrAllocCopy(enctype, value[HTML_FORM_ENCTYPE]); LYLowerCase(enctype); } if (present) { /* * Check for a TITLE attribute, and if none is present, check * for a SUBJECT attribute as a synonym. - FM */ if (present[HTML_FORM_TITLE] && non_empty(value[HTML_FORM_TITLE])) { StrAllocCopy(title, value[HTML_FORM_TITLE]); } else if (present[HTML_FORM_SUBJECT] && non_empty(value[HTML_FORM_SUBJECT])) { StrAllocCopy(title, value[HTML_FORM_SUBJECT]); } if (non_empty(title)) { TRANSLATE_AND_UNESCAPE_ENTITIES(&title, TRUE, FALSE); LYTrimHead(title); LYTrimTail(title); if (*title == '\0') { FREE(title); } } } HText_beginForm(action, method, enctype, title, accept_cs); FREE(action); FREE(method); FREE(enctype); FREE(title); } CHECK_ID(HTML_FORM_ID); break; case HTML_FIELDSET: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); CHECK_ID(HTML_GEN_ID); break; case HTML_LEGEND: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); CHECK_ID(HTML_CAPTION_ID); break; case HTML_LABEL: CHECK_ID(HTML_LABEL_ID); break; case HTML_KEYGEN: CHECK_ID(HTML_KEYGEN_ID); break; case HTML_BUTTON: { InputFieldData I; int chars; /* init */ memset(&I, 0, sizeof(I)); I.name_cs = ATTR_CS_IN; I.value_cs = ATTR_CS_IN; UPDATE_STYLE; if (present && present[HTML_BUTTON_TYPE] && value[HTML_BUTTON_TYPE]) { if (!strcasecomp(value[HTML_BUTTON_TYPE], "submit") || !strcasecomp(value[HTML_BUTTON_TYPE], "reset")) { /* * It's a button for submitting or resetting a form. - FM */ I.type = value[HTML_BUTTON_TYPE]; } else { /* * Ugh, it's a button for a script. - FM */ I.type = value[HTML_BUTTON_TYPE]; CTRACE((tfp, "found button for a script\n")); } } else { /* default, if no type given, is a submit button */ I.type = "submit"; } /* * Before any input field, add a collapsible space if we're not in * a PRE block, to promote a wrap there for any long values that * would extend past the right margin from our current position in * the line. If we are in a PRE block, start a new line if the * last line already is within 6 characters of the wrap point for * PRE blocks. - FM */ if (me->sp[0].tag_number != HTML_PRE && !me->inPRE && me->sp->style->freeFormat) { HTML_put_character(me, ' '); me->in_word = NO; } else if (HText_LastLineSize(me->text, FALSE) > (LYcolLimit - 6)) { HTML_put_character(me, '\n'); me->in_word = NO; } HTML_put_character(me, '('); if (!(present && present[HTML_BUTTON_NAME] && value[HTML_BUTTON_NAME])) { I.name = ""; } else if (StrChr(value[HTML_BUTTON_NAME], '&') == NULL) { I.name = value[HTML_BUTTON_NAME]; } else { StrAllocCopy(I_name, value[HTML_BUTTON_NAME]); UNESCAPE_FIELDNAME_TO_STD(&I_name); I.name = I_name; } if (present && present[HTML_BUTTON_VALUE] && non_empty(value[HTML_BUTTON_VALUE])) { /* * Convert any HTML entities or decimal escaping. - FM */ StrAllocCopy(I_value, value[HTML_BUTTON_VALUE]); me->UsePlainSpace = TRUE; TRANSLATE_AND_UNESCAPE_ENTITIES(&I_value, TRUE, me->HiddenValue); me->UsePlainSpace = FALSE; I.value = I_value; /* * Convert any newlines or tabs to spaces, and trim any lead or * trailing spaces. - FM */ LYReduceBlanks(I.value); } else if (!strcasecomp(I.type, "button")) { if (non_empty(I.name)) { StrAllocCopy(I.value, I.name); } else { StrAllocCopy(I.value, "BUTTON"); } } else if (I.value == 0) { StrAllocCopy(I.value, "BUTTON"); } if (present && present[HTML_BUTTON_READONLY]) I.readonly = YES; if (present && present[HTML_BUTTON_DISABLED]) I.disabled = YES; if (present && present[HTML_BUTTON_CLASS] && /* Not yet used. */ non_empty(value[HTML_BUTTON_CLASS])) I.iclass = value[HTML_BUTTON_CLASS]; if (present && present[HTML_BUTTON_ID] && non_empty(value[HTML_BUTTON_ID])) { I.id = value[HTML_BUTTON_ID]; CHECK_ID(HTML_BUTTON_ID); } if (present && present[HTML_BUTTON_LANG] && /* Not yet used. */ non_empty(value[HTML_BUTTON_LANG])) I.lang = value[HTML_BUTTON_LANG]; chars = HText_beginInput(me->text, me->inUnderline, &I); /* * Submit and reset buttons have values which don't change, so * HText_beginInput() sets I.value to the string which should be * displayed, and we'll enter that instead of underscore * placeholders into the HText structure to see it instead of * underscores when dumping or printing. We also won't worry about * a wrap in PRE blocks, because the line editor never is invoked * for submit or reset buttons. - LE & FM */ if (me->sp[0].tag_number == HTML_PRE || !me->sp->style->freeFormat) { /* * We have a submit or reset button in a PRE block, so output * the entire value from the markup. If it extends to the * right margin, it will wrap there, and only the portion * before that wrap will be hightlighted on screen display * (Yuk!) but we may as well show the rest of the full value on * the next or more lines. - FM */ while (I.value[i]) HTML_put_character(me, I.value[i++]); } else { /* * The submit or reset button is not in a PRE block. Note that * if a wrap occurs before outputting the entire value, the * wrapped portion will not be highlighted or clearly indicated * as part of the link for submission or reset (Yuk!). We'll * replace any spaces in the submit or reset button value with * nbsp, to promote a wrap at the space we ensured would be * present before the start of the string, as when we use all * underscores instead of the INPUT's actual value, but we * could still get a wrap at the right margin, instead, if the * value is greater than a line width for the current style. * Also, if chars somehow ended up longer than the length of * the actual value (shouldn't have), we'll continue padding * with nbsp up to the length of chars. - FM */ for (i = 0; I.value[i]; i++) { HTML_put_character(me, (char) ((I.value[i] == ' ') ? HT_NON_BREAK_SPACE : I.value[i])); } while (i++ < chars) { HTML_put_character(me, HT_NON_BREAK_SPACE); } } HTML_put_character(me, ')'); if (me->sp[0].tag_number != HTML_PRE && me->sp->style->freeFormat) { HTML_put_character(me, ' '); me->in_word = NO; } FREE(I_value); FREE(I_name); } break; case HTML_INPUT: { InputFieldData I; int chars; BOOL UseALTasVALUE = FALSE; BOOL HaveSRClink = FALSE; char *ImageSrc = NULL; BOOL IsSubmitOrReset = FALSE; HTkcode kcode = NOKANJI; HTkcode specified_kcode = NOKANJI; /* init */ memset(&I, 0, sizeof(I)); I.name_cs = ATTR_CS_IN; I.value_cs = ATTR_CS_IN; UPDATE_STYLE; /* * Before any input field, add a collapsible space if we're not in * a PRE block, to promote a wrap there for any long values that * would extend past the right margin from our current position in * the line. If we are in a PRE block, start a new line if the * last line already is within 6 characters of the wrap point for * PRE blocks. - FM */ if (me->sp[0].tag_number != HTML_PRE && !me->inPRE && me->sp->style->freeFormat) { HTML_put_character(me, ' '); me->in_word = NO; } else if (HText_LastLineSize(me->text, FALSE) > (LYcolLimit - 6)) { HTML_put_character(me, '\n'); me->in_word = NO; } /* * Get the TYPE and make sure we can handle it. - FM */ if (present && present[HTML_INPUT_TYPE] && non_empty(value[HTML_INPUT_TYPE])) { const char *not_impl = NULL; char *usingval = NULL; I.type = value[HTML_INPUT_TYPE]; if (!strcasecomp(I.type, "range")) { if (present[HTML_INPUT_MIN] && non_empty(value[HTML_INPUT_MIN])) I.min = value[HTML_INPUT_MIN]; if (present[HTML_INPUT_MAX] && non_empty(value[HTML_INPUT_MAX])) I.max = value[HTML_INPUT_MAX]; /* * Not yet implemented. */ #ifdef NOTDEFINED not_impl = "[RANGE Input]"; if (me->inFORM) HText_DisableCurrentForm(); #endif /* NOTDEFINED */ CTRACE((tfp, "HTML: Ignoring TYPE=\"range\"\n")); break; } else if (!strcasecomp(I.type, "file")) { if (present[HTML_INPUT_ACCEPT] && non_empty(value[HTML_INPUT_ACCEPT])) I.accept = value[HTML_INPUT_ACCEPT]; #ifndef USE_FILE_UPLOAD not_impl = "[FILE Input]"; CTRACE((tfp, "Attempting to fake as: %s\n", I.type)); #ifdef NOTDEFINED if (me->inFORM) HText_DisableCurrentForm(); #endif /* NOTDEFINED */ CTRACE((tfp, "HTML: Ignoring TYPE=\"file\"\n")); #endif /* USE_FILE_UPLOAD */ } else if (!strcasecomp(I.type, "button")) { /* * Ugh, a button for a script. */ not_impl = "[BUTTON Input]"; } if (not_impl != NULL) { if (me->inUnderline == FALSE) { HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); } HTML_put_string(me, not_impl); if (usingval != NULL) { HTML_put_string(me, usingval); FREE(usingval); } else { HTML_put_string(me, " (not implemented)"); } if (me->inUnderline == FALSE) { HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); } } } CTRACE((tfp, "Ok, we're trying type=[%s]\n", NONNULL(I.type))); /* * Check for an unclosed TEXTAREA. */ if (me->inTEXTAREA) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Missing TEXTAREA end tag.\n"); } } /* * Check for an unclosed SELECT, try to close it if found. */ if (me->inSELECT) { CTRACE((tfp, "HTML: Missing SELECT end tag, faking it...\n")); if (me->sp->tag_number != HTML_SELECT) { SET_SKIP_STACK(HTML_SELECT); } HTML_end_element(me, HTML_SELECT, include); } /* * Handle the INPUT as for a FORM. - FM */ if (!(present && present[HTML_INPUT_NAME] && non_empty(value[HTML_INPUT_NAME]))) { I.name = ""; } else if (StrChr(value[HTML_INPUT_NAME], '&') == NULL) { I.name = value[HTML_INPUT_NAME]; } else { StrAllocCopy(I_name, value[HTML_INPUT_NAME]); UNESCAPE_FIELDNAME_TO_STD(&I_name); I.name = I_name; } if ((present && present[HTML_INPUT_ALT] && non_empty(value[HTML_INPUT_ALT]) && I.type && !strcasecomp(I.type, "image")) && !(present && present[HTML_INPUT_VALUE] && non_empty(value[HTML_INPUT_VALUE]))) { /* * This is a TYPE="image" using an ALT rather than VALUE * attribute to indicate the link string for text clients or * GUIs with image loading off, so set the flag to use that as * if it were a VALUE attribute. - FM */ UseALTasVALUE = TRUE; } if (verbose_img && !clickable_images && present && present[HTML_INPUT_SRC] && non_empty(value[HTML_INPUT_SRC]) && I.type && !strcasecomp(I.type, "image")) { ImageSrc = MakeNewImageValue(value); } else if (clickable_images == TRUE && present && present[HTML_INPUT_SRC] && non_empty(value[HTML_INPUT_SRC]) && I.type && !strcasecomp(I.type, "image")) { StrAllocCopy(href, value[HTML_INPUT_SRC]); /* * We have a TYPE="image" with a non-zero-length SRC attribute * and want clickable images. Make the SRC's value a link if * it's still not zero-length legitimizing it. - FM */ LYLegitimizeHREF(me, &href, TRUE, TRUE); if (*href) { if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } me->CurrentA = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ NULL, /* Tag */ href, /* Addresss */ (HTLinkType *) 0); /* Type */ HText_beginAnchor(me->text, me->inUnderline, me->CurrentA); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_START_CHAR); HTML_put_string(me, VERBOSE_IMG(value, HTML_INPUT_SRC, "[IMAGE]")); FREE(newtitle); if (me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, 0); HTML_put_character(me, '-'); HaveSRClink = TRUE; } FREE(href); } CTRACE((tfp, "2.Ok, we're trying type=[%s] (present=%p)\n", NONNULL(I.type), (const void *) present)); /* text+file don't go in here */ if ((UseALTasVALUE == TRUE) || (present && present[HTML_INPUT_VALUE] && value[HTML_INPUT_VALUE] && (*value[HTML_INPUT_VALUE] || (I.type && (!strcasecomp(I.type, "checkbox") || !strcasecomp(I.type, "radio")))))) { /* * Convert any HTML entities or decimal escaping. - FM */ int CurrentCharSet = current_char_set; BOOL CurrentEightBitRaw = HTPassEightBitRaw; BOOLEAN CurrentUseDefaultRawMode = LYUseDefaultRawMode; HTCJKlang CurrentHTCJK = HTCJK; if (I.type && !strcasecomp(I.type, "hidden")) { me->HiddenValue = TRUE; current_char_set = LATIN1; /* Default ISO-Latin1 */ LYUseDefaultRawMode = TRUE; HTMLSetCharacterHandling(current_char_set); } CTRACE((tfp, "3.Ok, we're trying type=[%s]\n", NONNULL(I.type))); if (!I.type) me->UsePlainSpace = TRUE; else if (!strcasecomp(I.type, "text") || #ifdef USE_FILE_UPLOAD !strcasecomp(I.type, "file") || #endif !strcasecomp(I.type, "submit") || !strcasecomp(I.type, "image") || !strcasecomp(I.type, "reset")) { CTRACE((tfp, "normal field type: %s\n", NONNULL(I.type))); me->UsePlainSpace = TRUE; } StrAllocCopy(I_value, ((UseALTasVALUE == TRUE) ? value[HTML_INPUT_ALT] : value[HTML_INPUT_VALUE])); if (me->UsePlainSpace && !me->HiddenValue) { I.value_cs = current_char_set; } CTRACE((tfp, "4.Ok, we're trying type=[%s]\n", NONNULL(I.type))); TRANSLATE_AND_UNESCAPE_ENTITIES6(&I_value, ATTR_CS_IN, I.value_cs, (BOOL) (me->UsePlainSpace && !me->HiddenValue), me->UsePlainSpace, me->HiddenValue); I.value = I_value; if (me->UsePlainSpace == TRUE) { /* * Convert any newlines or tabs to spaces, and trim any * lead or trailing spaces. - FM */ LYReduceBlanks(I.value); } me->UsePlainSpace = FALSE; if (I.type && !strcasecomp(I.type, "hidden")) { me->HiddenValue = FALSE; current_char_set = CurrentCharSet; LYUseDefaultRawMode = CurrentUseDefaultRawMode; HTMLSetCharacterHandling(current_char_set); HTPassEightBitRaw = CurrentEightBitRaw; HTCJK = CurrentHTCJK; } } else if (HaveSRClink == TRUE) { /* * We put up an [IMAGE] link and '-' for a TYPE="image" and * didn't get a VALUE or ALT string, so fake a "Submit" value. * If we didn't put up a link, then HText_beginInput() will use * "[IMAGE]-Submit". - FM */ StrAllocCopy(I_value, "Submit"); I.value = I_value; } else if (ImageSrc) { /* [IMAGE]-Submit with verbose images and not clickable images. * Use ImageSrc if no other alt or value is supplied. --LE */ I.value = ImageSrc; } if (present && present[HTML_INPUT_READONLY]) I.readonly = YES; if (present && present[HTML_INPUT_CHECKED]) I.checked = YES; if (present && present[HTML_INPUT_SIZE] && non_empty(value[HTML_INPUT_SIZE])) I.size = atoi(value[HTML_INPUT_SIZE]); LimitValue(I.size, MAX_LINE); if (present && present[HTML_INPUT_MAXLENGTH] && non_empty(value[HTML_INPUT_MAXLENGTH])) I.maxlength = value[HTML_INPUT_MAXLENGTH]; if (present && present[HTML_INPUT_DISABLED]) I.disabled = YES; if (present && present[HTML_INPUT_ACCEPT_CHARSET]) { /* Not yet used. */ I.accept_cs = (value[HTML_INPUT_ACCEPT_CHARSET] ? value[HTML_INPUT_ACCEPT_CHARSET] : "UNKNOWN"); } if (present && present[HTML_INPUT_ALIGN] && /* Not yet used. */ non_empty(value[HTML_INPUT_ALIGN])) I.align = value[HTML_INPUT_ALIGN]; if (present && present[HTML_INPUT_CLASS] && /* Not yet used. */ non_empty(value[HTML_INPUT_CLASS])) I.iclass = value[HTML_INPUT_CLASS]; if (present && present[HTML_INPUT_ERROR] && /* Not yet used. */ non_empty(value[HTML_INPUT_ERROR])) I.error = value[HTML_INPUT_ERROR]; if (present && present[HTML_INPUT_HEIGHT] && /* Not yet used. */ non_empty(value[HTML_INPUT_HEIGHT])) I.height = value[HTML_INPUT_HEIGHT]; if (present && present[HTML_INPUT_WIDTH] && /* Not yet used. */ non_empty(value[HTML_INPUT_WIDTH])) I.width = value[HTML_INPUT_WIDTH]; if (present && present[HTML_INPUT_ID] && non_empty(value[HTML_INPUT_ID])) { I.id = value[HTML_INPUT_ID]; CHECK_ID(HTML_INPUT_ID); } if (present && present[HTML_INPUT_LANG] && /* Not yet used. */ non_empty(value[HTML_INPUT_LANG])) I.lang = value[HTML_INPUT_LANG]; if (present && present[HTML_INPUT_MD] && /* Not yet used. */ non_empty(value[HTML_INPUT_MD])) I.md = value[HTML_INPUT_MD]; chars = HText_beginInput(me->text, me->inUnderline, &I); CTRACE((tfp, "I.%s have %d chars, or something\n", NONNULL(I.type), chars)); /* * Submit and reset buttons have values which don't change, so * HText_beginInput() sets I.value to the string which should be * displayed, and we'll enter that instead of underscore * placeholders into the HText structure to see it instead of * underscores when dumping or printing. We also won't worry about * a wrap in PRE blocks, because the line editor never is invoked * for submit or reset buttons. - LE & FM */ if (I.type && (!strcasecomp(I.type, "submit") || !strcasecomp(I.type, "reset") || !strcasecomp(I.type, "image"))) IsSubmitOrReset = TRUE; if (I.type && chars == 3 && !strcasecomp(I.type, "radio")) { /* * Put a (_) placeholder, and one space (collapsible) before * the label that is expected to follow. - FM */ HTML_put_string(me, "(_)"); HText_endInput(me->text); chars = 0; me->in_word = YES; if (me->sp[0].tag_number != HTML_PRE && me->sp->style->freeFormat) { HTML_put_character(me, ' '); me->in_word = NO; } } else if (I.type && chars == 3 && !strcasecomp(I.type, "checkbox")) { /* * Put a [_] placeholder, and one space (collapsible) before * the label that is expected to follow. - FM */ HTML_put_string(me, "[_]"); HText_endInput(me->text); chars = 0; me->in_word = YES; if (me->sp[0].tag_number != HTML_PRE && me->sp->style->freeFormat) { HTML_put_character(me, ' '); me->in_word = NO; } } else if ((me->sp[0].tag_number == HTML_PRE || !me->sp->style->freeFormat) && chars > 6 && IsSubmitOrReset == FALSE) { /* * This is not a submit or reset button, and we are in a PRE * block with a field intended to exceed 6 character widths. * The code inadequately handles INPUT fields in PRE tags if * wraps occur (at the right margin) for the underscore * placeholders. We'll put up a minimum of 6 underscores, * since we should have wrapped artificially, above, if the * INPUT begins within 6 columns of the right margin, and if * any more would exceed the wrap column, we'll ignore them. * Note that if we somehow get tripped up and a wrap still does * occur before all 6 of the underscores are output, the * wrapped ones won't be treated as part of the editing window, * nor be highlighted when not editing (Yuk!). - FM */ for (i = 0; i < 6; i++) { HTML_put_character(me, '_'); chars--; } } CTRACE((tfp, "I.%s, %d\n", NONNULL(I.type), IsSubmitOrReset)); if (IsSubmitOrReset == FALSE) { /* * This is not a submit or reset button, so output the rest of * the underscore placeholders, if any more are needed. - FM */ if (chars > 0) { for (; chars > 0; chars--) HTML_put_character(me, '_'); HText_endInput(me->text); } } else { if (HTCJK == JAPANESE) { kcode = HText_getKcode(me->text); HText_updateKcode(me->text, kanji_code); specified_kcode = HText_getSpecifiedKcode(me->text); HText_updateSpecifiedKcode(me->text, kanji_code); } if (me->sp[0].tag_number == HTML_PRE || !me->sp->style->freeFormat) { /* * We have a submit or reset button in a PRE block, so * output the entire value from the markup. If it extends * to the right margin, it will wrap there, and only the * portion before that wrap will be hightlighted on screen * display (Yuk!) but we may as well show the rest of the * full value on the next or more lines. - FM */ while (I.value[i]) HTML_put_character(me, I.value[i++]); } else { /* * The submit or reset button is not in a PRE block. Note * that if a wrap occurs before outputting the entire * value, the wrapped portion will not be highlighted or * clearly indicated as part of the link for submission or * reset (Yuk!). We'll replace any spaces in the submit or * reset button value with nbsp, to promote a wrap at the * space we ensured would be present before the start of * the string, as when we use all underscores instead of * the INPUT's actual value, but we could still get a wrap * at the right margin, instead, if the value is greater * than a line width for the current style. Also, if chars * somehow ended up longer than the length of the actual * value (shouldn't have), we'll continue padding with nbsp * up to the length of chars. - FM */ for (i = 0; I.value[i]; i++) HTML_put_character(me, (char) (I.value[i] == ' ' ? HT_NON_BREAK_SPACE : I.value[i])); while (i++ < chars) HTML_put_character(me, HT_NON_BREAK_SPACE); } if (HTCJK == JAPANESE) { HText_updateKcode(me->text, kcode); HText_updateSpecifiedKcode(me->text, specified_kcode); } } if (chars != 0) { HText_endInput(me->text); } FREE(ImageSrc); FREE(I_value); FREE(I_name); } break; case HTML_TEXTAREA: /* * Set to know we are in a textarea. */ me->inTEXTAREA = TRUE; /* * Get ready for the value. */ HTChunkClear(&me->textarea); if (present && present[HTML_TEXTAREA_NAME] && value[HTML_TEXTAREA_NAME]) { StrAllocCopy(me->textarea_name, value[HTML_TEXTAREA_NAME]); me->textarea_name_cs = ATTR_CS_IN; if (StrChr(value[HTML_TEXTAREA_NAME], '&') != NULL) { UNESCAPE_FIELDNAME_TO_STD(&me->textarea_name); } } else { StrAllocCopy(me->textarea_name, ""); } if (present && present[HTML_TEXTAREA_ACCEPT_CHARSET]) { if (value[HTML_TEXTAREA_ACCEPT_CHARSET]) { StrAllocCopy(me->textarea_accept_cs, value[HTML_TEXTAREA_ACCEPT_CHARSET]); TRANSLATE_AND_UNESCAPE_TO_STD(&me->textarea_accept_cs); } else { StrAllocCopy(me->textarea_accept_cs, "UNKNOWN"); } } else { FREE(me->textarea_accept_cs); } if (present && present[HTML_TEXTAREA_COLS] && value[HTML_TEXTAREA_COLS] && isdigit(UCH(*value[HTML_TEXTAREA_COLS]))) { me->textarea_cols = atoi(value[HTML_TEXTAREA_COLS]); } else { int width; width = LYcolLimit - me->new_style->leftIndent - me->new_style->rightIndent; if (dump_output_immediately) /* don't waste too much for this */ width = HTMIN(width, DFT_TEXTAREA_COLS); if (width > 1 && (width - 1) * 6 < MAX_LINE - 3 - me->new_style->leftIndent - me->new_style->rightIndent) me->textarea_cols = width; else me->textarea_cols = DFT_TEXTAREA_COLS; } LimitValue(me->textarea_cols, MAX_TEXTAREA_COLS); if (present && present[HTML_TEXTAREA_ROWS] && value[HTML_TEXTAREA_ROWS] && isdigit(UCH(*value[HTML_TEXTAREA_ROWS]))) { me->textarea_rows = atoi(value[HTML_TEXTAREA_ROWS]); } else { me->textarea_rows = DFT_TEXTAREA_ROWS; } LimitValue(me->textarea_rows, MAX_TEXTAREA_ROWS); /* * Lynx treats disabled and readonly textarea's the same - * unmodifiable in either case. */ me->textarea_readonly = NO; if (present && present[HTML_TEXTAREA_READONLY]) me->textarea_readonly = YES; me->textarea_disabled = NO; if (present && present[HTML_TEXTAREA_DISABLED]) me->textarea_disabled = YES; if (present && present[HTML_TEXTAREA_ID] && non_empty(value[HTML_TEXTAREA_ID])) { StrAllocCopy(id_string, value[HTML_TEXTAREA_ID]); TRANSLATE_AND_UNESCAPE_TO_STD(&id_string); if ((*id_string != '\0') && (ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ id_string, /* Tag */ NULL, /* Addresss */ (HTLinkType *) 0))) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); StrAllocCopy(me->textarea_id, id_string); } else { FREE(me->textarea_id); } FREE(id_string); } else { FREE(me->textarea_id); } break; case HTML_SELECT: /* * Check for an already open SELECT block. - FM */ if (me->inSELECT) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: SELECT start tag in SELECT element. Faking SELECT end tag. *****\n"); } if (me->sp->tag_number != HTML_SELECT) { SET_SKIP_STACK(HTML_SELECT); } HTML_end_element(me, HTML_SELECT, include); } /* * Start a new SELECT block. - FM */ LYHandleSELECT(me, present, (STRING2PTR) value, include, TRUE); break; case HTML_OPTION: { /* * An option is a special case of an input field. */ InputFieldData I; /* * Make sure we're in a select tag. */ if (!me->inSELECT) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: OPTION tag not within SELECT tag\n"); } /* * Too likely to cause a crash, so we'll ignore it. - FM */ break; } if (!me->first_option) { /* * Finish the data off. */ HTChunkTerminate(&me->option); /* * Finish the previous option @@@@@ */ HText_setLastOptionValue(me->text, me->option.data, me->LastOptionValue, MIDDLE_ORDER, me->LastOptionChecked, me->UCLYhndl, ATTR_CS_IN); } /* * If it's not a multiple option list and select popups are * enabled, then don't use the checkbox/button method, and don't * put anything on the screen yet. */ if (me->first_option || HTCurSelectGroupType == F_CHECKBOX_TYPE || LYSelectPopups == FALSE) { if (HTCurSelectGroupType == F_CHECKBOX_TYPE || LYSelectPopups == FALSE) { /* * Start a newline before each option. */ LYEnsureSingleSpace(me); } else { /* * Add option list designation character. */ HText_appendCharacter(me->text, '['); me->in_word = YES; } /* * Inititialize. */ memset(&I, 0, sizeof(I)); I.name_cs = -1; I.value_cs = current_char_set; I.type = "OPTION"; if ((present && present[HTML_OPTION_SELECTED]) || (me->first_option && LYSelectPopups == FALSE && HTCurSelectGroupType == F_RADIO_TYPE)) I.checked = YES; if (present && present[HTML_OPTION_VALUE] && value[HTML_OPTION_VALUE]) { /* * Convert any HTML entities or decimal escaping. - FM */ StrAllocCopy(I_value, value[HTML_OPTION_VALUE]); me->HiddenValue = TRUE; TRANSLATE_AND_UNESCAPE_ENTITIES6(&I_value, ATTR_CS_IN, ATTR_CS_IN, NO, me->UsePlainSpace, me->HiddenValue); I.value_cs = ATTR_CS_IN; me->HiddenValue = FALSE; I.value = I_value; } if (me->select_disabled || (0 && present && present[HTML_OPTION_DISABLED])) { /* 2009/5/25 - suppress check for "disabled" attribute * for Debian #525934 -TD */ I.disabled = YES; } if (present && present[HTML_OPTION_ID] && non_empty(value[HTML_OPTION_ID])) { if ((ID_A = HTAnchor_findChildAndLink(me->node_anchor, /* Parent */ value[HTML_OPTION_ID], /* Tag */ NULL, /* Addresss */ 0)) != NULL) { /* Type */ HText_beginAnchor(me->text, me->inUnderline, ID_A); HText_endAnchor(me->text, 0); I.id = value[HTML_OPTION_ID]; } } HText_beginInput(me->text, me->inUnderline, &I); if (HTCurSelectGroupType == F_CHECKBOX_TYPE) { /* * Put a "[_]" placeholder, and one space (collapsible) * before the label that is expected to follow. - FM */ HText_appendCharacter(me->text, '['); HText_appendCharacter(me->text, '_'); HText_appendCharacter(me->text, ']'); HText_appendCharacter(me->text, ' '); HText_setLastChar(me->text, ' '); /* absorb white space */ me->in_word = NO; } else if (LYSelectPopups == FALSE) { /* * Put a "(_)" placeholder, and one space (collapsible) * before the label that is expected to follow. - FM */ HText_appendCharacter(me->text, '('); HText_appendCharacter(me->text, '_'); HText_appendCharacter(me->text, ')'); HText_appendCharacter(me->text, ' '); HText_setLastChar(me->text, ' '); /* absorb white space */ me->in_word = NO; } } /* * Get ready for the next value. */ HTChunkClear(&me->option); if ((present && present[HTML_OPTION_SELECTED]) || (me->first_option && LYSelectPopups == FALSE && HTCurSelectGroupType == F_RADIO_TYPE)) me->LastOptionChecked = TRUE; else me->LastOptionChecked = FALSE; me->first_option = FALSE; if (present && present[HTML_OPTION_VALUE] && value[HTML_OPTION_VALUE]) { if (!I_value) { /* * Convert any HTML entities or decimal escaping. - FM */ StrAllocCopy(I_value, value[HTML_OPTION_VALUE]); me->HiddenValue = TRUE; TRANSLATE_AND_UNESCAPE_ENTITIES6(&I_value, ATTR_CS_IN, ATTR_CS_IN, NO, me->UsePlainSpace, me->HiddenValue); me->HiddenValue = FALSE; } StrAllocCopy(me->LastOptionValue, I_value); } else { StrAllocCopy(me->LastOptionValue, me->option.data); } /* * If this is a popup option, print its option for use in selecting * option by number. - LE */ if (HTCurSelectGroupType == F_RADIO_TYPE && LYSelectPopups && fields_are_numbered()) { char marker[8]; int opnum = HText_getOptionNum(me->text); if (opnum > 0 && opnum < 100000) { sprintf(marker, "(%d)", opnum); HTML_put_string(me, marker); for (i = (int) strlen(marker); i < 5; ++i) { HTML_put_character(me, '_'); } } } FREE(I_value); } break; case HTML_TABLE: /* * Not fully implemented. Just treat as a division with respect to any * ALIGN attribute, with a default of HT_LEFT, or leave as a PRE block * if we are presently in one. - FM * * Also notify simple table tracking code unless in a preformatted * section, or (currently) non-left alignment. * * If page author is using a TABLE within PRE, it's probably formatted * specifically to work well for Lynx without simple table tracking * code. Cancel tracking, it would only make things worse. - kw */ #ifdef EXP_NESTED_TABLES if (!nested_tables) #endif { HText_cancelStbl(me->text); } if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } if (me->Underline_Level > 0) { SET_SKIP_STACK(HTML_U); HTML_end_element(me, HTML_U, include); } me->inTABLE = TRUE; if (me->sp->style->id == ST_Preformatted) { UPDATE_STYLE; CHECK_ID(HTML_TABLE_ID); break; } if (me->Division_Level < (MAX_NESTING - 1)) { me->Division_Level++; } else { CTRACE((tfp, "HTML: ****** Maximum nesting of %d divisions/tables exceeded!\n", MAX_NESTING)); } if (present && present[HTML_TABLE_ALIGN] && non_empty(value[HTML_TABLE_ALIGN])) { if (!strcasecomp(value[HTML_TABLE_ALIGN], "center")) { if (no_table_center) { me->DivisionAlignments[me->Division_Level] = HT_LEFT; change_paragraph_style(me, styles[HTML_DLEFT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DLEFT]->alignment; } else { me->DivisionAlignments[me->Division_Level] = HT_CENTER; change_paragraph_style(me, styles[HTML_DCENTER]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DCENTER]->alignment; } stbl_align = HT_CENTER; } else if (!strcasecomp(value[HTML_TABLE_ALIGN], "right")) { me->DivisionAlignments[me->Division_Level] = HT_RIGHT; change_paragraph_style(me, styles[HTML_DRIGHT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DRIGHT]->alignment; stbl_align = HT_RIGHT; } else { me->DivisionAlignments[me->Division_Level] = HT_LEFT; change_paragraph_style(me, styles[HTML_DLEFT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DLEFT]->alignment; if (!strcasecomp(value[HTML_TABLE_ALIGN], "left") || !strcasecomp(value[HTML_TABLE_ALIGN], "justify")) stbl_align = HT_LEFT; } } else { me->DivisionAlignments[me->Division_Level] = HT_LEFT; change_paragraph_style(me, styles[HTML_DLEFT]); UPDATE_STYLE; me->current_default_alignment = styles[HTML_DLEFT]->alignment; /* stbl_align remains HT_ALIGN_NONE */ } CHECK_ID(HTML_TABLE_ID); HText_startStblTABLE(me->text, stbl_align); break; case HTML_TR: /* * Not fully implemented. Just start a new row, if needed, act on an * ALIGN attribute if present, and check for an ID link. - FM * Also notify simple table tracking code. - kw */ if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } if (me->Underline_Level > 0) { SET_SKIP_STACK(HTML_U); HTML_end_element(me, HTML_U, include); } UPDATE_STYLE; if (!HText_LastLineEmpty(me->text, FALSE)) { HText_setLastChar(me->text, ' '); /* absorb white space */ HText_appendCharacter(me->text, '\r'); } me->in_word = NO; if (me->sp->style->id == ST_Preformatted) { CHECK_ID(HTML_TR_ID); me->inP = FALSE; break; } if (LYoverride_default_alignment(me)) { me->sp->style->alignment = styles[me->sp[0].tag_number]->alignment; } else if (me->List_Nesting_Level >= 0 || ((me->Division_Level < 0) && (me->sp->style->id == ST_Normal || me->sp->style->id == ST_Preformatted))) { me->sp->style->alignment = HT_LEFT; } else { me->sp->style->alignment = (short) me->current_default_alignment; } if (present && present[HTML_TR_ALIGN] && value[HTML_TR_ALIGN]) { if (!strcasecomp(value[HTML_TR_ALIGN], "center") && !(me->List_Nesting_Level >= 0 && !me->inP)) { if (no_table_center) me->sp->style->alignment = HT_LEFT; else me->sp->style->alignment = HT_CENTER; stbl_align = HT_CENTER; } else if (!strcasecomp(value[HTML_TR_ALIGN], "right") && !(me->List_Nesting_Level >= 0 && !me->inP)) { me->sp->style->alignment = HT_RIGHT; stbl_align = HT_RIGHT; } else if (!strcasecomp(value[HTML_TR_ALIGN], "left") || !strcasecomp(value[HTML_TR_ALIGN], "justify")) { me->sp->style->alignment = HT_LEFT; stbl_align = HT_LEFT; } } CHECK_ID(HTML_TR_ID); me->inP = FALSE; HText_startStblTR(me->text, stbl_align); break; case HTML_THEAD: case HTML_TFOOT: case HTML_TBODY: HText_endStblTR(me->text); /* * Not fully implemented. Just check for an ID link. - FM */ if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } if (me->Underline_Level > 0) { SET_SKIP_STACK(HTML_U); HTML_end_element(me, HTML_U, include); } UPDATE_STYLE; if (me->inTABLE) { if (present && present[HTML_TR_ALIGN] && value[HTML_TR_ALIGN]) { if (!strcasecomp(value[HTML_TR_ALIGN], "center")) { stbl_align = HT_CENTER; } else if (!strcasecomp(value[HTML_TR_ALIGN], "right")) { stbl_align = HT_RIGHT; } else if (!strcasecomp(value[HTML_TR_ALIGN], "left") || !strcasecomp(value[HTML_TR_ALIGN], "justify")) { stbl_align = HT_LEFT; } } HText_startStblRowGroup(me->text, stbl_align); } CHECK_ID(HTML_TR_ID); break; case HTML_COL: case HTML_COLGROUP: /* * Not fully implemented. Just check for an ID link. - FM */ if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } if (me->Underline_Level > 0) { SET_SKIP_STACK(HTML_U); HTML_end_element(me, HTML_U, include); } UPDATE_STYLE; if (me->inTABLE) { int span = 1; if (present && present[HTML_COL_SPAN] && value[HTML_COL_SPAN] && isdigit(UCH(*value[HTML_COL_SPAN]))) span = atoi(value[HTML_COL_SPAN]); if (present && present[HTML_COL_ALIGN] && value[HTML_COL_ALIGN]) { if (!strcasecomp(value[HTML_COL_ALIGN], "center")) { stbl_align = HT_CENTER; } else if (!strcasecomp(value[HTML_COL_ALIGN], "right")) { stbl_align = HT_RIGHT; } else if (!strcasecomp(value[HTML_COL_ALIGN], "left") || !strcasecomp(value[HTML_COL_ALIGN], "justify")) { stbl_align = HT_LEFT; } } HText_startStblCOL(me->text, span, stbl_align, (BOOL) (ElementNumber == HTML_COLGROUP)); } CHECK_ID(HTML_COL_ID); break; case HTML_TH: case HTML_TD: if (me->inA) { SET_SKIP_STACK(HTML_A); HTML_end_element(me, HTML_A, include); } if (me->Underline_Level > 0) { SET_SKIP_STACK(HTML_U); HTML_end_element(me, HTML_U, include); } UPDATE_STYLE; CHECK_ID(HTML_TD_ID); /* * Not fully implemented. Just add a collapsible space and break - FM * Also notify simple table tracking code. - kw */ HTML_put_character(me, ' '); { int colspan = 1, rowspan = 1; if (present && present[HTML_TD_COLSPAN] && value[HTML_TD_COLSPAN] && isdigit(UCH(*value[HTML_TD_COLSPAN]))) colspan = atoi(value[HTML_TD_COLSPAN]); if (present && present[HTML_TD_ROWSPAN] && value[HTML_TD_ROWSPAN] && isdigit(UCH(*value[HTML_TD_ROWSPAN]))) rowspan = atoi(value[HTML_TD_ROWSPAN]); if (present && present[HTML_TD_ALIGN] && value[HTML_TD_ALIGN]) { if (!strcasecomp(value[HTML_TD_ALIGN], "center")) { stbl_align = HT_CENTER; } else if (!strcasecomp(value[HTML_TD_ALIGN], "right")) { stbl_align = HT_RIGHT; } else if (!strcasecomp(value[HTML_TD_ALIGN], "left") || !strcasecomp(value[HTML_TD_ALIGN], "justify")) { stbl_align = HT_LEFT; } } HText_startStblTD(me->text, colspan, rowspan, stbl_align, (BOOL) (ElementNumber == HTML_TH)); } me->in_word = NO; break; case HTML_MATH: /* * We're getting it as Literal text, which, until we can process it, * we'll display as is, within brackets to alert the user. - FM */ HTChunkClear(&me->math); CHECK_ID(HTML_GEN_ID); break; default: break; } /* end switch */ if (ElementNumber >= HTML_ELEMENTS || HTML_dtd.tags[ElementNumber].contents != SGML_EMPTY) { if (me->skip_stack > 0) { CTRACE((tfp, "HTML:begin_element: internal call (level %d), leaving on stack - `%s'\n", me->skip_stack, NONNULL(GetHTStyleName(me->sp->style)))); me->skip_stack--; return status; } if (me->sp == me->stack) { if (me->stack_overrun == FALSE) { HTAlert(HTML_STACK_OVERRUN); CTRACE((tfp, "HTML: ****** Maximum nesting of %d tags exceeded!\n", MAX_NESTING)); me->stack_overrun = TRUE; } return HT_ERROR; } CTRACE((tfp, "HTML:begin_element[%d]: adding style to stack - %s (%s)\n", (int) STACKLEVEL(me), NONNULL(GetHTStyleName(me->new_style)), HTML_dtd.tags[ElementNumber].name)); (me->sp)--; me->sp[0].style = me->new_style; /* Stack new style */ me->sp[0].tag_number = ElementNumber; #ifdef USE_JUSTIFY_ELTS if (wait_for_this_stacked_elt < 0 && HTML_dtd.tags[ElementNumber].can_justify == FALSE) wait_for_this_stacked_elt = (int) (me->stack - me->sp) + MAX_NESTING; #endif } #ifdef USE_JUSTIFY_ELTS if (in_DT && ElementNumber == HTML_DD) in_DT = FALSE; else if (ElementNumber == HTML_DT) in_DT = TRUE; #endif #if defined(USE_COLOR_STYLE) /* end really empty tags straight away */ if (ReallyEmptyTagNum(element_number)) { CTRACE2(TRACE_STYLE, (tfp, "STYLE.begin_element:ending \"EMPTY\" element style\n")); HText_characterStyle(me->text, HCODE_TO_STACK_OFF(hcode), STACK_OFF); # if !OMIT_SCN_KEEPING FastTrimColorClass(HTML_dtd.tags[element_number].name, HTML_dtd.tags[element_number].name_len, Style_className, &Style_className_end, &hcode); # endif } #endif /* USE_COLOR_STYLE */ return status; } /* End Element * ----------- * * When we end an element, the style must be returned to that * in effect before that element. Note that anchors (etc?) * don't have an associated style, so that we must scan down the * stack for an element with a defined style. (In fact, the styles * should be linked to the whole stack not just the top one.) * TBL 921119 */ static int HTML_end_element(HTStructured * me, int element_number, char **include) { static char empty[1]; int i = 0; int status = HT_OK; char *temp = NULL, *cp = NULL; BOOL BreakFlag = FALSE; BOOL intern_flag = FALSE; #ifdef USE_COLOR_STYLE BOOL skip_stack_requested = FALSE; #endif EMIT_IFDEF_USE_JUSTIFY_ELTS(BOOL reached_awaited_stacked_elt = FALSE); #ifdef USE_PRETTYSRC if (psrc_view && !sgml_in_psrc_was_initialized) { if (!psrc_nested_call) { HTTag *tag = &HTML_dtd.tags[element_number]; char buf[200]; int tag_charset = 0; psrc_nested_call = TRUE; PSRCSTART(abracket); PUTS("</"); PSRCSTOP(abracket); PSRCSTART(tag); if (tagname_transform != 0) PUTS(tag->name); else { LYStrNCpy(buf, tag->name, sizeof(buf) - 1); LYLowerCase(buf); PUTS(buf); } PSRCSTOP(tag); PSRCSTART(abracket); PUTC('>'); PSRCSTOP(abracket); psrc_nested_call = FALSE; return HT_OK; } /*fall through */ } #endif if ((me->sp >= (me->stack + MAX_NESTING - 1) || element_number != me->sp[0].tag_number) && HTML_dtd.tags[element_number].contents != SGML_EMPTY) { CTRACE((tfp, "HTML: end of element %s when expecting end of %s\n", HTML_dtd.tags[element_number].name, (me->sp == me->stack + MAX_NESTING - 1) ? "none" : (me->sp->tag_number < 0) ? "*invalid tag*" : (me->sp->tag_number >= HTML_ELEMENTS) ? "special tag" : HTML_dtd.tags[me->sp->tag_number].name)); } /* * If we're seeking MAPs, skip everything that's not a MAP or AREA tag. - * FM */ if (LYMapsOnly) { if (!(element_number == HTML_MAP || element_number == HTML_AREA || element_number == HTML_OBJECT)) { return HT_OK; } } /* * Pop state off stack if we didn't declare the element SGML_EMPTY in * HTMLDTD.c. - FM & KW */ if (HTML_dtd.tags[element_number].contents != SGML_EMPTY) { #ifdef USE_COLOR_STYLE skip_stack_requested = (BOOL) (me->skip_stack > 0); #endif if ((element_number != me->sp[0].tag_number) && me->skip_stack <= 0 && HTML_dtd.tags[HTML_LH].contents != SGML_EMPTY && (me->sp[0].tag_number == HTML_UL || me->sp[0].tag_number == HTML_OL || me->sp[0].tag_number == HTML_MENU || me->sp[0].tag_number == HTML_DIR || me->sp[0].tag_number == HTML_LI) && (element_number == HTML_H1 || element_number == HTML_H2 || element_number == HTML_H3 || element_number == HTML_H4 || element_number == HTML_H5 || element_number == HTML_H6)) { /* * Set the break flag if we're popping a dummy HTML_LH substituted * for an HTML_H# encountered in a list. */ BreakFlag = TRUE; } if (me->skip_stack == 0 && element_number == HTML_OBJECT && me->sp[0].tag_number == HTML_OBJECT_M && (me->sp < (me->stack + MAX_NESTING - 1))) me->sp[0].tag_number = HTML_OBJECT; if (me->skip_stack > 0) { CTRACE2(TRACE_STYLE, (tfp, "HTML:end_element: Internal call (level %d), leaving on stack - %s\n", me->skip_stack, NONNULL(GetHTStyleName(me->sp->style)))); me->skip_stack--; } else if (element_number == HTML_OBJECT && me->sp[0].tag_number != HTML_OBJECT && me->sp[0].tag_number != HTML_OBJECT_M && me->objects_mixed_open > 0 && !(me->objects_figged_open > 0 && me->sp[0].tag_number == HTML_FIG)) { /* * Ignore non-corresponding OBJECT tags that we didn't push because * the SGML parser was supposed to go on parsing the contents * non-literally. - kw */ CTRACE2(TRACE_STYLE, (tfp, "HTML:end_element[%d]: %s (level %d), %s - %s\n", (int) STACKLEVEL(me), "Special OBJECT handling", me->objects_mixed_open, "leaving on stack", NONNULL(GetHTStyleName(me->sp->style)))); me->objects_mixed_open--; } else if (me->stack_overrun == TRUE && element_number != me->sp[0].tag_number) { /* * Ignore non-corresponding tags if we had a stack overrun. This * is not a completely fail-safe strategy for protection against * any seriously adverse consequences of a stack overrun, and the * rendering of the document will not be as intended, but we expect * overruns to be rare, and this should offer reasonable protection * against crashes if an overrun does occur. - FM */ return HT_OK; /* let's pretend... */ } else if (element_number == HTML_SELECT && me->sp[0].tag_number != HTML_SELECT) { /* * Ignore non-corresponding SELECT tags, since we probably popped * it and closed the SELECT block to deal with markup which amounts * to a nested SELECT, or an out of order FORM end tag. - FM */ return HT_OK; } else if ((element_number != me->sp[0].tag_number) && HTML_dtd.tags[HTML_LH].contents == SGML_EMPTY && (me->sp[0].tag_number == HTML_UL || me->sp[0].tag_number == HTML_OL || me->sp[0].tag_number == HTML_MENU || me->sp[0].tag_number == HTML_DIR || me->sp[0].tag_number == HTML_LI) && (element_number == HTML_H1 || element_number == HTML_H2 || element_number == HTML_H3 || element_number == HTML_H4 || element_number == HTML_H5 || element_number == HTML_H6)) { /* * It's an H# for which we substituted an HTML_LH, which we've * declared as SGML_EMPTY, so just return. - FM */ return HT_OK; } else if (me->sp < (me->stack + MAX_NESTING - 1)) { #ifdef USE_JUSTIFY_ELTS if (wait_for_this_stacked_elt == me->stack - me->sp + MAX_NESTING) reached_awaited_stacked_elt = TRUE; #endif if (element_number == HTML_OBJECT) { if (me->sp[0].tag_number == HTML_FIG && me->objects_figged_open > 0) { /* * It's an OBJECT for which we substituted a FIG, so pop * the FIG and pretend that's what we are being called for. * - kw */ CTRACE2(TRACE_STYLE, (tfp, "HTML:end_element[%d]: %s (level %d), %s - %s\n", (int) STACKLEVEL(me), "Special OBJECT->FIG handling", me->objects_figged_open, "treating as end FIG", NONNULL(GetHTStyleName(me->sp->style)))); me->objects_figged_open--; element_number = HTML_FIG; } } (me->sp)++; CTRACE2(TRACE_STYLE, (tfp, "HTML:end_element[%d]: Popped style off stack - %s\n", (int) STACKLEVEL(me), NONNULL(GetHTStyleName(me->sp->style)))); } else { CTRACE2(TRACE_STYLE, (tfp, "Stack underflow error! Tried to pop off more styles than exist in stack\n")); } } if (BreakFlag == TRUE) { #ifdef USE_JUSTIFY_ELTS if (reached_awaited_stacked_elt) wait_for_this_stacked_elt = -1; #endif return HT_OK; /* let's pretend... */ } /* * Check for unclosed TEXTAREA. - FM */ if (me->inTEXTAREA && element_number != HTML_TEXTAREA) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Missing TEXTAREA end tag\n"); } } if (!me->text && !LYMapsOnly) { UPDATE_STYLE; } /* * Handle the end tag. - FM */ switch (element_number) { case HTML_HTML: if (me->inA || me->inSELECT || me->inTEXTAREA) { if (LYBadHTML(me)) { char *msg = NULL; HTSprintf0(&msg, "Bad HTML: %s%s%s%s%s not closed before HTML end tag *****\n", me->inSELECT ? "SELECT" : "", (me->inSELECT && me->inTEXTAREA) ? ", " : "", me->inTEXTAREA ? "TEXTAREA" : "", (((me->inSELECT || me->inTEXTAREA) && me->inA) ? ", " : ""), me->inA ? "A" : ""); LYShowBadHTML(msg); FREE(msg); } } break; case HTML_HEAD: if (me->inBASE && (LYIsUIPage3(me->node_anchor->address, UIP_LIST_PAGE, 0) || LYIsUIPage3(me->node_anchor->address, UIP_ADDRLIST_PAGE, 0))) { /* If we are parsing the List Page, and have a BASE after we are * done with the HEAD element, propagate it back to the node_anchor * object. The base should have been inserted by showlist() to * record what document the List Page is about, and other functions * may later look for it in the anchor. - kw */ StrAllocCopy(me->node_anchor->content_base, me->base_href); } if (HText_hasToolbar(me->text)) HText_appendParagraph(me->text); break; case HTML_TITLE: HTChunkTerminate(&me->title); HTAnchor_setTitle(me->node_anchor, me->title.data); HTChunkClear(&me->title); /* * Check if it's a bookmark file, and if so, and multiple bookmark * support is on, or it's off but this isn't the default bookmark file * (e.g., because it was on before, and this is another bookmark file * that has been retrieved as a previous document), insert the current * description string and filepath for it. We pass the strings back to * the SGML parser so that any 8 bit or multibyte/CJK characters will * be handled by the parser's state and charset routines. - FM */ if (non_empty(me->node_anchor->bookmark)) { if ((LYMultiBookmarks != MBM_OFF) || (non_empty(bookmark_page) && strcmp(me->node_anchor->bookmark, bookmark_page))) { if (!include) include = &me->xinclude; for (i = 0; i <= MBM_V_MAXFILES; i++) { if (MBM_A_subbookmark[i] && !strcmp(MBM_A_subbookmark[i], me->node_anchor->bookmark)) { StrAllocCat(*include, "<H2><EM>"); StrAllocCat(*include, gettext("Description:")); StrAllocCat(*include, "</EM> "); StrAllocCopy(temp, ((MBM_A_subdescript[i] && *MBM_A_subdescript[i]) ? MBM_A_subdescript[i] : gettext("(none)"))); LYEntify(&temp, TRUE); StrAllocCat(*include, temp); StrAllocCat(*include, "<BR><EM>&nbsp;&nbsp;&nbsp;"); StrAllocCat(*include, gettext("Filepath:")); StrAllocCat(*include, "</EM> "); StrAllocCopy(temp, ((MBM_A_subbookmark[i] && *MBM_A_subbookmark[i]) ? MBM_A_subbookmark[i] : gettext("(unknown)"))); LYEntify(&temp, TRUE); StrAllocCat(*include, temp); FREE(temp); StrAllocCat(*include, "</H2>"); break; } } } } break; case HTML_STYLE: /* * We're getting it as Literal text, which, for now, we'll just ignore. * - FM */ HTChunkTerminate(&me->style_block); CTRACE2(TRACE_STYLE, (tfp, "HTML: STYLE content =\n%s\n", me->style_block.data)); HTChunkClear(&me->style_block); break; case HTML_SCRIPT: /* * We're getting it as Literal text, which, for now, we'll just ignore. * - FM */ HTChunkTerminate(&me->script); CTRACE((tfp, "HTML: SCRIPT content =\n%s\n", me->script.data)); HTChunkClear(&me->script); break; case HTML_BODY: if (me->inA || me->inSELECT || me->inTEXTAREA) { if (LYBadHTML(me)) { char *msg = NULL; HTSprintf0(&msg, "Bad HTML: %s%s%s%s%s not closed before BODY end tag *****\n", me->inSELECT ? "SELECT" : "", (me->inSELECT && me->inTEXTAREA) ? ", " : "", me->inTEXTAREA ? "TEXTAREA" : "", (((me->inSELECT || me->inTEXTAREA) && me->inA) ? ", " : ""), me->inA ? "A" : ""); LYShowBadHTML(msg); FREE(msg); } } break; case HTML_FRAMESET: change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_NOFRAMES: case HTML_IFRAME: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_BANNER: case HTML_MARQUEE: case HTML_BLOCKQUOTE: case HTML_BQ: case HTML_ADDRESS: /* * Set flag to know that style has ended. Fall through. i_prior_style = -1; */ change_paragraph_style(me, me->sp->style); UPDATE_STYLE; if (me->sp->tag_number == element_number) LYEnsureDoubleSpace(me); if (me->List_Nesting_Level >= 0) HText_NegateLineOne(me->text); break; case HTML_CENTER: case HTML_DIV: if (me->Division_Level >= 0) me->Division_Level--; if (me->Division_Level >= 0) { if (me->sp->style->alignment != me->DivisionAlignments[me->Division_Level]) { if (me->inP) LYEnsureSingleSpace(me); me->sp->style->alignment = me->DivisionAlignments[me->Division_Level]; } } change_paragraph_style(me, me->sp->style); if (me->style_change) { actually_set_style(me); if (me->List_Nesting_Level >= 0) HText_NegateLineOne(me->text); } else if (me->inP) LYEnsureSingleSpace(me); me->current_default_alignment = me->sp->style->alignment; break; case HTML_H1: /* header styles */ case HTML_H2: case HTML_H3: case HTML_H4: case HTML_H5: case HTML_H6: if (me->Division_Level >= 0) { me->sp->style->alignment = me->DivisionAlignments[me->Division_Level]; } else if (me->sp->style->id == ST_HeadingCenter || me->sp->style->id == ST_Heading1) { me->sp->style->alignment = HT_CENTER; } else if (me->sp->style->id == ST_HeadingRight) { me->sp->style->alignment = HT_RIGHT; } else { me->sp->style->alignment = HT_LEFT; } change_paragraph_style(me, me->sp->style); UPDATE_STYLE; if (styles[element_number]->font & HT_BOLD) { if (me->inBoldA == FALSE && me->inBoldH == TRUE) { HText_appendCharacter(me->text, LY_BOLD_END_CHAR); } me->inBoldH = FALSE; } if (me->List_Nesting_Level >= 0) HText_NegateLineOne(me->text); if (me->Underline_Level > 0 && me->inUnderline == FALSE) { HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); me->inUnderline = TRUE; } break; case HTML_P: LYHandlePlike(me, (const BOOL *) 0, (STRING2PTR) 0, include, 0, FALSE); break; case HTML_FONT: me->inFONT = FALSE; break; case HTML_B: /* Physical character highlighting */ case HTML_BLINK: case HTML_I: case HTML_U: case HTML_CITE: /* Logical character highlighting */ case HTML_EM: case HTML_STRONG: /* * Ignore any emphasis end tags if the Underline_Level is not set. - * FM */ if (me->Underline_Level <= 0) break; /* * Adjust the Underline level counter, and turn off underlining if * appropriate. - FM */ me->Underline_Level--; if (me->inUnderline && me->Underline_Level < 1) { HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); me->inUnderline = FALSE; CTRACE((tfp, "Ending underline\n")); } else { CTRACE((tfp, "Underline Level is %d\n", me->Underline_Level)); } break; case HTML_ABBR: /* Miscellaneous character containers */ case HTML_ACRONYM: case HTML_AU: case HTML_AUTHOR: case HTML_BIG: case HTML_CODE: case HTML_DFN: case HTML_KBD: case HTML_SAMP: case HTML_SMALL: case HTML_SUP: case HTML_TT: case HTML_VAR: break; case HTML_SUB: HText_appendCharacter(me->text, ']'); break; case HTML_DEL: case HTML_S: case HTML_STRIKE: HTML_put_character(me, ' '); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, ":DEL]"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); me->in_word = NO; break; case HTML_INS: HTML_put_character(me, ' '); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, ":INS]"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); me->in_word = NO; break; case HTML_Q: if (me->Quote_Level > 0) me->Quote_Level--; /* * Should check LANG and/or DIR attributes, and the * me->node_anchor->charset and/or yet to be added structure elements, * to determine whether we should use chevrons, but for now we'll * always use double- or single-quotes. - FM */ if (!(me->Quote_Level & 1)) HTML_put_character(me, '"'); else HTML_put_character(me, '\''); break; case HTML_PRE: /* Formatted text */ /* * Set to know that we are no longer in a PRE block. */ HText_appendCharacter(me->text, '\n'); me->inPRE = FALSE; /* FALLTHRU */ case HTML_LISTING: /* Literal text */ /* FALLTHRU */ case HTML_XMP: /* FALLTHRU */ case HTML_PLAINTEXT: if (me->comment_start) HText_appendText(me->text, me->comment_start); change_paragraph_style(me, me->sp->style); /* Often won't really change */ if (me->List_Nesting_Level >= 0) { UPDATE_STYLE; HText_NegateLineOne(me->text); } break; case HTML_NOTE: case HTML_FN: change_paragraph_style(me, me->sp->style); /* Often won't really change */ UPDATE_STYLE; if (me->sp->tag_number == element_number) LYEnsureDoubleSpace(me); if (me->List_Nesting_Level >= 0) HText_NegateLineOne(me->text); me->inLABEL = FALSE; break; case HTML_OL: me->OL_Counter[me->List_Nesting_Level < 11 ? me->List_Nesting_Level : 11] = OL_VOID; /* FALLTHRU */ case HTML_DL: /* FALLTHRU */ case HTML_UL: /* FALLTHRU */ case HTML_MENU: /* FALLTHRU */ case HTML_DIR: me->List_Nesting_Level--; CTRACE((tfp, "HTML_end_element: Reducing List Nesting Level to %d\n", me->List_Nesting_Level)); #ifdef USE_JUSTIFY_ELTS if (element_number == HTML_DL) in_DT = FALSE; /*close the term that was without definition. */ #endif change_paragraph_style(me, me->sp->style); /* Often won't really change */ UPDATE_STYLE; if (me->List_Nesting_Level >= 0) LYEnsureSingleSpace(me); break; case HTML_SPAN: /* * Should undo anything we did based on LANG and/or DIR attributes, and * the me->node_anchor->charset and/or yet to be added structure * elements. - FM */ break; case HTML_BDO: /* * Should undo anything we did based on DIR (and/or LANG) attributes, * and the me->node_anchor->charset and/or yet to be added structure * elements. - FM */ break; case HTML_A: /* * Ignore any spurious A end tags. - FM */ if (me->inA == FALSE) break; /* * Set to know that we are no longer in an anchor. */ me->inA = FALSE; #ifdef MARK_HIDDEN_LINKS if (non_empty(hidden_link_marker) && HText_isAnchorBlank(me->text, me->CurrentANum)) { HText_appendText(me->text, hidden_link_marker); } #endif UPDATE_STYLE; if (me->inBoldA == TRUE && me->inBoldH == FALSE) HText_appendCharacter(me->text, LY_BOLD_END_CHAR); HText_endAnchor(me->text, me->CurrentANum); me->CurrentANum = 0; me->inBoldA = FALSE; if (me->Underline_Level > 0 && me->inUnderline == FALSE) { HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); me->inUnderline = TRUE; } break; case HTML_MAP: FREE(me->map_address); break; case HTML_BODYTEXT: /* * We may need to look at this someday to deal with OBJECTs optimally, * but just ignore it for now. - FM */ change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_TEXTFLOW: /* * We may need to look at this someday to deal with APPLETs optimally, * but just ignore it for now. - FM */ change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_FIG: LYHandleFIG(me, NULL, NULL, 0, 0, NULL, NULL, NO, FALSE, &intern_flag); break; case HTML_OBJECT: /* * Finish the data off. */ { int s = 0, e = 0; char *start = NULL, *first_end = NULL, *last_end = NULL; char *first_map = NULL, *last_map = NULL; BOOL have_param = FALSE; char *data = NULL; HTChunkTerminate(&me->object); data = me->object.data; while ((cp = StrChr(data, '<')) != NULL) { /* * Look for nested OBJECTs. This procedure could get tripped * up if invalid comments are present in the content, or if an * OBJECT end tag is present in a quoted attribute. - FM */ if (!StrNCmp(cp, "<!--", 4)) { data = LYFindEndOfComment(cp); cp = data; } else if (s == 0 && !strncasecomp(cp, "<PARAM", 6) && !IsNmChar(cp[6])) { have_param = TRUE; } else if (!strncasecomp(cp, "<OBJECT", 7) && !IsNmChar(cp[7])) { if (s == 0) start = cp; s++; } else if (!strncasecomp(cp, "</OBJECT", 8) && !IsNmChar(cp[8])) { if (e == 0) first_end = cp; last_end = cp; e++; } else if (!strncasecomp(cp, "<MAP", 4) && !IsNmChar(cp[4])) { if (!first_map) first_map = cp; last_map = cp; } else if (!strncasecomp(cp, "</MAP", 5) && !IsNmChar(cp[5])) { last_map = cp; } data = ++cp; } if (s < e) { /* * We had more end tags than start tags, so we have bad HTML or * otherwise misparsed. - FM */ if (LYBadHTML(me)) { char *msg = NULL; HTSprintf0(&msg, "Bad HTML: Unmatched OBJECT start and end tags. Discarding content:\n%s\n", me->object.data); LYShowBadHTML(msg); FREE(msg); } goto End_Object; } if (s > e) { if (!me->object_declare && !me->object_name && !(me->object_shapes && !LYMapsOnly) && !(me->object_usemap != NULL && !LYMapsOnly) && !(clickable_images && !LYMapsOnly && me->object_data != NULL && !have_param && me->object_classid == NULL && me->object_codebase == NULL && me->object_codetype == NULL)) { /* * We have nested OBJECT tags, and not yet all of the end * tags, but have a case where the content needs to be * parsed again (not dropped) and where we don't want to * output anything special at the point when we * *do* have accumulated all the end tags. So recycle * the incomplete contents now, and signal the SGML parser * that it should not regard the current OBJECT ended but * should treat its contents as mixed. Normally these * cases would have already handled in the real * start_element call, so this block may not be necessary. * - kw */ CTRACE((tfp, "%s:\n%s\n", "HTML: Nested OBJECT tags. Recycling incomplete contents", me->object.data)); status = HT_PARSER_OTHER_CONTENT; me->object.size--; HTChunkPuts(&me->object, "</OBJECT>"); if (!include) /* error, should not happen */ include = &me->xinclude; StrnAllocCat(*include, me->object.data, (size_t) me->object.size); clear_objectdata(me); /* an internal fake call to keep our stack happy: */ HTML_start_element(me, HTML_OBJECT, NULL, NULL, me->tag_charset, include); break; } /* * We have nested OBJECT tags, and not yet all of the end tags, * and we want the end tags. So restore an end tag to the * content, and signal to the SGML parser that it should resume * the accumulation of OBJECT content (after calling back to * start_element) in a way that is equivalent to passing it a * dummy start tag. - FM, kw */ CTRACE((tfp, "HTML: Nested OBJECT tags. Recycling.\n")); status = HT_PARSER_REOPEN_ELT; me->object.size--; HTChunkPuts(&me->object, "</OBJECT>"); if (!LYMapsOnly) change_paragraph_style(me, me->sp->style); break; } /* * OBJECT start and end tags are fully matched, assuming we weren't * tripped up by comments or quoted attributes. - FM */ CTRACE((tfp, "HTML:OBJECT content:\n%s\n", me->object.data)); /* * OBJECTs with DECLARE should be saved but not instantiated, and * if nested, can have only other DECLAREd OBJECTs. Until we have * code to handle these, we'll just create an anchor for the ID, if * present, and discard the content (sigh 8-). - FM */ if (me->object_declare == TRUE) { if (non_empty(me->object_id) && !LYMapsOnly) LYHandleID(me, me->object_id); CTRACE((tfp, "HTML: DECLAREd OBJECT. Ignoring!\n")); goto End_Object; } /* * OBJECTs with NAME are for FORM submissions. We'll just create * an anchor for the ID, if present, and discard the content until * we have code to handle these. (sigh 8-). - FM */ if (me->object_name != NULL && !LYMapsOnly) { if (non_empty(me->object_id)) LYHandleID(me, me->object_id); CTRACE((tfp, "HTML: NAMEd OBJECT. Ignoring!\n")); goto End_Object; } /* * Deal with any nested OBJECTs by descending to the inner-most * OBJECT. - FM */ if (s > 0) { if (start != NULL && first_end != NULL && first_end > start) { /* * Minumum requirements for the ad hoc parsing to have * succeeded are met. We'll hope that it did succeed. - * FM */ if (LYMapsOnly) { /* * Well we don't need to do this any more, nested * objects should either not get here any more at all * or can be handled fine by other code below. Leave * in place for now as a special case for LYMapsOnly. * - kw */ if (LYMapsOnly && (!last_map || last_map < first_end)) *first_end = '\0'; else e = 0; data = NULL; if (LYMapsOnly && (!first_map || first_map > start)) StrAllocCopy(data, start); else StrAllocCopy(data, me->object.data); if (e > 0) { for (i = e; i > 0; i--) { StrAllocCat(data, "</OBJECT>"); } } if (!include) /* error, should not happen */ include = &me->xinclude; StrAllocCat(*include, data); CTRACE((tfp, "HTML: Recycling nested OBJECT%s.\n", (s > 1) ? "s" : "")); FREE(data); goto End_Object; } } else { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Unmatched OBJECT start and end tags. Discarding content.\n"); } goto End_Object; } } /* * If its content has SHAPES, convert it to FIG. - FM * * This is now handled in our start_element without using include * if the SGML parser cooperates, so this block may be unnecessary. * - kw */ if (me->object_shapes == TRUE && !LYMapsOnly) { CTRACE((tfp, "HTML: OBJECT has SHAPES. Converting to FIG.\n")); if (!include) /* error, should not happen */ include = &me->xinclude; StrAllocCat(*include, "<FIG ISOBJECT IMAGEMAP"); if (me->object_ismap == TRUE) StrAllocCat(*include, " IMAGEMAP"); if (me->object_id != NULL) { StrAllocCat(*include, " ID=\""); StrAllocCat(*include, me->object_id); StrAllocCat(*include, "\""); } if (me->object_data != NULL && me->object_classid == NULL) { StrAllocCat(*include, " SRC=\""); StrAllocCat(*include, me->object_data); StrAllocCat(*include, "\""); } StrAllocCat(*include, ">"); me->object.size--; HTChunkPuts(&me->object, "</FIG>"); HTChunkTerminate(&me->object); StrAllocCat(*include, me->object.data); goto End_Object; } /* * If it has a USEMAP attribute and didn't have SHAPES, convert it * to IMG. - FM */ if (me->object_usemap != NULL && !LYMapsOnly) { CTRACE((tfp, "HTML: OBJECT has USEMAP. Converting to IMG.\n")); if (!include) /* error, should not happen */ include = &me->xinclude; StrAllocCat(*include, "<IMG ISOBJECT"); if (me->object_id != NULL) { /* * Pass the ID. - FM */ StrAllocCat(*include, " ID=\""); StrAllocCat(*include, me->object_id); StrAllocCat(*include, "\""); } if (me->object_data != NULL && me->object_classid == NULL) { /* * We have DATA with no CLASSID, so let's hope it' * equivalent to an SRC. - FM */ StrAllocCat(*include, " SRC=\""); StrAllocCat(*include, me->object_data); StrAllocCat(*include, "\""); } if (me->object_title != NULL) { /* * Use the TITLE for both the MAP and the IMGs ALT. - FM */ StrAllocCat(*include, " TITLE=\""); StrAllocCat(*include, me->object_title); StrAllocCat(*include, "\" ALT=\""); StrAllocCat(*include, me->object_title); StrAllocCat(*include, "\""); } /* * Add the USEMAP, and an ISMAP if present. - FM */ if (me->object_usemap != NULL) { StrAllocCat(*include, " USEMAP=\""); StrAllocCat(*include, me->object_usemap); if (me->object_ismap == TRUE) StrAllocCat(*include, "\" ISMAP>"); else StrAllocCat(*include, "\">"); } else { StrAllocCat(*include, ">"); } /* * Add the content if it has <MAP, since that may be the MAP * this usemap points to. But if we have nested objects, try * to eliminate portions that cannot contribute to the quest * for MAP. This is not perfect, we may get too much content; * this seems preferable over losing too much. - kw */ if (first_map) { if (s == 0) { StrAllocCat(*include, me->object.data); CTRACE((tfp, "HTML: MAP found, recycling object contents.\n")); goto End_Object; } /* s > 0 and s == e */ data = NULL; if (last_map < start) { *start = '\0'; i = 0; } else if (last_map < first_end) { *first_end = '\0'; i = e; } else if (last_map < last_end) { *last_end = '\0'; i = 1; } else { i = 0; } if (first_map > last_end) { /* fake empty object to keep stacks stack happy */ StrAllocCopy(data, "<OBJECT><"); StrAllocCat(data, last_end + 1); i = 0; } else if (first_map > start) { StrAllocCopy(data, start); } else { StrAllocCopy(data, me->object.data); } for (; i > 0; i--) { StrAllocCat(data, "</OBJECT>"); } CTRACE((tfp, "%s:\n%s\n", "HTML: MAP and nested OBJECT tags. Recycling parts", data)); StrAllocCat(*include, data); FREE(data); } goto End_Object; } /* * Add an ID link if needed. - FM */ if (non_empty(me->object_id) && !LYMapsOnly) LYHandleID(me, me->object_id); /* * Add the OBJECTs content if not empty. - FM */ if (me->object.size > 1) { if (!include) /* error, should not happen */ include = &me->xinclude; StrAllocCat(*include, me->object.data); } /* * Create a link to the DATA, if desired, and we can rule out that * it involves scripting code. This a risky thing to do, but we * can toggle clickable_images mode off if it really screws things * up, and so we may as well give it a try. - FM */ if (clickable_images) { if (!LYMapsOnly && me->object_data != NULL && !have_param && me->object_classid == NULL && me->object_codebase == NULL && me->object_codetype == NULL) { /* * We have a DATA value and no need for scripting code, so * close the current Anchor, if one is open, and add an * Anchor for this source. If we also have a TYPE value, * check whether it's an image or not, and set the link * name accordingly. - FM */ if (!include) /* error, should not happen */ include = &me->xinclude; if (me->inA) StrAllocCat(*include, "</A>"); StrAllocCat(*include, " -<A HREF=\""); StrAllocCat(*include, me->object_data); StrAllocCat(*include, "\">"); if ((me->object_type != NULL) && !strncasecomp(me->object_type, "image/", 6)) { StrAllocCat(*include, "(IMAGE)"); } else { StrAllocCat(*include, "(OBJECT)"); } StrAllocCat(*include, "</A> "); } } } /* * Re-intialize all of the OBJECT elements. - FM */ End_Object: clear_objectdata(me); if (!LYMapsOnly) change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_APPLET: if (me->inAPPLETwithP) { LYEnsureDoubleSpace(me); } else { HTML_put_character(me, ' '); /* space char may be ignored */ } LYResetParagraphAlignment(me); me->inAPPLETwithP = FALSE; me->inAPPLET = FALSE; change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_CAPTION: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); me->inCAPTION = FALSE; change_paragraph_style(me, me->sp->style); /* Often won't really change */ me->inLABEL = FALSE; break; case HTML_CREDIT: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); me->inCREDIT = FALSE; change_paragraph_style(me, me->sp->style); /* Often won't really change */ me->inLABEL = FALSE; break; case HTML_FORM: /* * Check if we had a FORM start tag, and issue a message if not, but * fall through to check for an open SELECT and ensure that the * FORM-related globals in GridText.c are initialized. - FM */ if (!me->inFORM) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Unmatched FORM end tag\n"); } } EMIT_IFDEF_USE_JUSTIFY_ELTS(form_in_htext = FALSE); /* * Check if we still have a SELECT element open. FORM may have been * declared SGML_EMPTY in HTMLDTD.c, and in that case SGML_character() * in SGML.c is not able to ensure correct nesting; or it may have * failed to enforce valid nesting. If a SELECT is open, issue a * message, then call HTML_end_element() directly (with a check in that * to bypass decrementing of the HTML parser's stack) to close the * SELECT. - kw */ if (me->inSELECT) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Open SELECT at FORM end. Faking SELECT end tag. *****\n"); } if (me->sp->tag_number != HTML_SELECT) { SET_SKIP_STACK(HTML_SELECT); } HTML_end_element(me, HTML_SELECT, include); } /* * Set to know that we are no longer in an form. */ me->inFORM = FALSE; HText_endForm(me->text); /* * If we are in a list and are on the first line with no text following * a bullet or number, don't force a newline. This could happen if we * were called from HTML_start_element() due to a missing FORM end tag. * - FM */ if (!(me->List_Nesting_Level >= 0 && !me->inP)) LYEnsureSingleSpace(me); break; case HTML_FIELDSET: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_LEGEND: LYEnsureDoubleSpace(me); LYResetParagraphAlignment(me); change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; case HTML_LABEL: break; case HTML_BUTTON: break; case HTML_TEXTAREA: { InputFieldData I; int chars; char *data; /* * Make sure we had a textarea start tag. */ if (!me->inTEXTAREA) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Unmatched TEXTAREA end tag\n"); } break; } /* * Set to know that we are no longer in a textarea tag. */ me->inTEXTAREA = FALSE; /* * Initialize. */ memset(&I, 0, sizeof(I)); I.value_cs = current_char_set; UPDATE_STYLE; /* * Before any input field add a space if necessary. */ HTML_put_character(me, ' '); me->in_word = NO; /* * Add a return. */ HText_appendCharacter(me->text, '\r'); /* * Finish the data off. */ HTChunkTerminate(&me->textarea); FREE(temp); I.type = "textarea"; I.size = me->textarea_cols; I.name = me->textarea_name; I.name_cs = me->textarea_name_cs; I.accept_cs = me->textarea_accept_cs; me->textarea_accept_cs = NULL; I.disabled = me->textarea_disabled; I.readonly = me->textarea_readonly; I.id = me->textarea_id; /* * Transform the TEXTAREA content as needed, then parse it into * individual lines to be handled as a series series of INPUT * fields (ugh!). Any raw 8-bit or multibyte characters already * have been handled in relation to the display character set in * SGML_character(). * * If TEXTAREA is handled as SGML_LITTERAL (the old way), we need * to SGML-unescape any character references and NCRs here. * Otherwise this will already have happened in the SGML.c parsing. * - kw */ me->UsePlainSpace = TRUE; if (HTML_dtd.tags[element_number].contents == SGML_LITTERAL) { TRANSLATE_AND_UNESCAPE_ENTITIES6(&me->textarea.data, me->UCLYhndl, current_char_set, NO, me->UsePlainSpace, me->HiddenValue); } else { /* * This shouldn't have anything to do, normally, but just in * case... There shouldn't be lynx special character codes in * the chunk ("DTD" flag Tgf_nolyspcl tells SGML.c not to * generate them). If there were, we could set the last * parameter ('Back') below to YES, which would take them out * of the data. The data may however contain non break space, * soft hyphen, or en space etc., in the me->UCLYhndl character * encoding. If that's a problem, perhaps for the (line or * other) editor, setting 'Back' to YES should also help to * always convert them to plain spaces (or drop them). - kw */ TRANSLATE_HTML7(&me->textarea.data, me->UCLYhndl, current_char_set, NO, me->UsePlainSpace, me->HiddenValue, NO); } data = me->textarea.data; /* * Trim any trailing newlines and skip any lead newlines. - FM */ if (*data != '\0') { cp = (data + strlen(data)) - 1; while (cp >= data && *cp == '\n') { *cp-- = '\0'; } while (*data == '\n') { data++; } } /* * Load the first text line, or set up for all blank rows. - FM */ if ((cp = StrChr(data, '\n')) != NULL) { *cp = '\0'; StrAllocCopy(temp, data); *cp = '\n'; data = (cp + 1); } else { if (*data != '\0') { StrAllocCopy(temp, data); } else { FREE(temp); } data = empty; } /* * Display at least the requested number of text lines and/or blank * rows. - FM */ for (i = 0; i < me->textarea_rows; i++) { int j; for (j = 0; temp && temp[j]; j++) { if (temp[j] == '\r') temp[j] = (char) (temp[j + 1] ? ' ' : '\0'); } I.value = temp; chars = HText_beginInput(me->text, me->inUnderline, &I); for (; chars > 0; chars--) HTML_put_character(me, '_'); HText_appendCharacter(me->text, '\r'); if (*data != '\0') { if (*data == '\n') { FREE(temp); data++; } else if ((cp = StrChr(data, '\n')) != NULL) { *cp = '\0'; StrAllocCopy(temp, data); *cp = '\n'; data = (cp + 1); } else { StrAllocCopy(temp, data); data = empty; } } else { FREE(temp); } } /* * Check for more data lines than the rows attribute. We add them * to the display, because we support only horizontal and not also * vertical scrolling. - FM */ while (*data != '\0' || temp != NULL) { int j; for (j = 0; temp && temp[j]; j++) { if (temp[j] == '\r') temp[j] = (char) (temp[j + 1] ? ' ' : '\0'); } I.value = temp; (void) HText_beginInput(me->text, me->inUnderline, &I); for (chars = me->textarea_cols; chars > 0; chars--) HTML_put_character(me, '_'); HText_appendCharacter(me->text, '\r'); if (*data == '\n') { FREE(temp); data++; } else if ((cp = StrChr(data, '\n')) != NULL) { *cp = '\0'; StrAllocCopy(temp, data); *cp = '\n'; data = (cp + 1); } else if (*data != '\0') { StrAllocCopy(temp, data); data = empty; } else { FREE(temp); } } FREE(temp); cp = NULL; me->UsePlainSpace = FALSE; HTChunkClear(&me->textarea); FREE(me->textarea_name); me->textarea_name_cs = -1; FREE(me->textarea_id); break; } case HTML_SELECT: { char *ptr = NULL; /* * Make sure we had a select start tag. */ if (!me->inSELECT) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: Unmatched SELECT end tag *****\n"); } break; } /* * Set to know that we are no longer in a select tag. */ me->inSELECT = FALSE; /* * Clear the disable attribute. */ me->select_disabled = FALSE; /* * Make sure we're in a form. */ if (!me->inFORM) { if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: SELECT end tag not within FORM element *****\n"); } /* * Hopefully won't crash, so we'll ignore it. - kw */ } /* * Finish the data off. */ HTChunkTerminate(&me->option); /* * Finish the previous option. */ if (!me->first_option) ptr = HText_setLastOptionValue(me->text, me->option.data, me->LastOptionValue, LAST_ORDER, me->LastOptionChecked, me->UCLYhndl, ATTR_CS_IN); FREE(me->LastOptionValue); me->LastOptionChecked = FALSE; if (HTCurSelectGroupType == F_CHECKBOX_TYPE || LYSelectPopups == FALSE) { /* * Start a newline after the last checkbox/button option. */ LYEnsureSingleSpace(me); } else { /* * Output popup box with the default option to screen, but use * non-breaking spaces for output. */ if (ptr && (me->sp[0].tag_number == HTML_PRE || me->inPRE == TRUE || !me->sp->style->freeFormat) && strlen(ptr) > 6) { /* * The code inadequately handles OPTION fields in PRE tags. * We'll put up a minimum of 6 characters, and if any more * would exceed the wrap column, we'll ignore them. */ for (i = 0; i < 6; i++) { if (*ptr == ' ') HText_appendCharacter(me->text, HT_NON_BREAK_SPACE); else HText_appendCharacter(me->text, *ptr); ptr++; } } for (; non_empty(ptr); ptr++) { if (*ptr == ' ') HText_appendCharacter(me->text, HT_NON_BREAK_SPACE); else { HTkcode kcode = NOKANJI; HTkcode specified_kcode = NOKANJI; if (HTCJK == JAPANESE) { kcode = HText_getKcode(me->text); HText_updateKcode(me->text, kanji_code); specified_kcode = HText_getSpecifiedKcode(me->text); HText_updateSpecifiedKcode(me->text, kanji_code); } HText_appendCharacter(me->text, *ptr); if (HTCJK == JAPANESE) { HText_updateKcode(me->text, kcode); HText_updateSpecifiedKcode(me->text, specified_kcode); } } } /* * Add end option character. */ if (!me->first_option) { HText_appendCharacter(me->text, ']'); HText_endInput(me->text); HText_setLastChar(me->text, ']'); me->in_word = YES; } } HTChunkClear(&me->option); if (me->Underline_Level > 0 && me->inUnderline == FALSE) { HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); me->inUnderline = TRUE; } if (me->needBoldH == TRUE && me->inBoldH == FALSE) { HText_appendCharacter(me->text, LY_BOLD_START_CHAR); me->inBoldH = TRUE; me->needBoldH = FALSE; } } break; case HTML_TABLE: #ifdef EXP_NESTED_TABLES if (!nested_tables) #endif me->inTABLE = FALSE; if (me->sp->style->id == ST_Preformatted) { break; } if (me->Division_Level >= 0) me->Division_Level--; if (me->Division_Level >= 0) me->sp->style->alignment = me->DivisionAlignments[me->Division_Level]; change_paragraph_style(me, me->sp->style); UPDATE_STYLE; #ifdef EXP_NESTED_TABLES if (nested_tables) { me->inTABLE = HText_endStblTABLE(me->text); } else { HText_endStblTABLE(me->text); } #else HText_endStblTABLE(me->text); #endif me->current_default_alignment = me->sp->style->alignment; if (me->List_Nesting_Level >= 0) HText_NegateLineOne(me->text); break; /* These TABLE related elements may now not be SGML_EMPTY. - kw */ case HTML_TR: HText_endStblTR(me->text); if (!HText_LastLineEmpty(me->text, FALSE)) { HText_setLastChar(me->text, ' '); /* absorb next white space */ HText_appendCharacter(me->text, '\r'); } me->in_word = NO; break; case HTML_THEAD: case HTML_TFOOT: case HTML_TBODY: break; case HTML_COLGROUP: if (me->inTABLE) HText_endStblCOLGROUP(me->text); break; case HTML_TH: case HTML_TD: HText_endStblTD(me->text); break; /* More stuff that may now not be SGML_EMPTY any more: */ case HTML_DT: case HTML_DD: case HTML_LH: case HTML_LI: case HTML_OVERLAY: break; case HTML_MATH: /* * We're getting it as Literal text, which, until we can process it, * we'll display as is, within brackets to alert the user. - FM */ HTChunkPutc(&me->math, ' '); HTChunkTerminate(&me->math); if (me->math.size > 2) { LYEnsureSingleSpace(me); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, "[MATH:"); HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); HTML_put_character(me, ' '); HTML_put_string(me, me->math.data); HText_appendCharacter(me->text, LY_UNDERLINE_START_CHAR); HTML_put_string(me, ":MATH]"); if (me->inUnderline == FALSE) HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); LYEnsureSingleSpace(me); } HTChunkClear(&me->math); break; default: change_paragraph_style(me, me->sp->style); /* Often won't really change */ break; } /* switch */ #ifdef USE_JUSTIFY_ELTS if (reached_awaited_stacked_elt) wait_for_this_stacked_elt = -1; #endif if (me->xinclude) { HText_appendText(me->text, " *** LYNX ERROR ***\rUnparsed data:\r"); HText_appendText(me->text, me->xinclude); FREE(me->xinclude); } #ifdef USE_COLOR_STYLE if (!skip_stack_requested) { /*don't emit stylechanges if skipped stack element - VH */ # if !OMIT_SCN_KEEPING FastTrimColorClass(HTML_dtd.tags[element_number].name, HTML_dtd.tags[element_number].name_len, Style_className, &Style_className_end, &hcode); # endif if (!ReallyEmptyTagNum(element_number)) { CTRACE2(TRACE_STYLE, (tfp, "STYLE.end_element: ending non-\"EMPTY\" style <%s...>\n", HTML_dtd.tags[element_number].name)); HText_characterStyle(me->text, HCODE_TO_STACK_OFF(hcode), STACK_OFF); } } #endif /* USE_COLOR_STYLE */ return status; } /* Expanding entities * ------------------ */ /* (In fact, they all shrink!) */ int HTML_put_entity(HTStructured * me, int entity_number) { int nent = (int) HTML_dtd.number_of_entities; if (entity_number < nent) { HTML_put_string(me, p_entity_values[entity_number]); return HT_OK; } return HT_CANNOT_TRANSLATE; } /* Free an HTML object * ------------------- * * If the document is empty, the text object will not yet exist. * So we could in fact abandon creating the document and return * an error code. In fact an empty document is an important type * of document, so we don't. * * If non-interactive, everything is freed off. No: crashes -listrefs * Otherwise, the interactive object is left. */ static void HTML_free(HTStructured * me) { char *include = NULL; if (LYMapsOnly && !me->text) { /* * We only handled MAP, AREA and BASE tags, and didn't create an HText * structure for the document nor want one now, so just make sure we * free anything that might have been allocated. - FM */ FREE(me->base_href); FREE(me->map_address); clear_objectdata(me); FREE(me->xinclude); FREE(me); return; } UPDATE_STYLE; /* Creates empty document here! */ if (me->comment_end) HTML_put_string(me, me->comment_end); if (me->text) { /* * Emphasis containers, A, FONT, and FORM may be declared SGML_EMPTY in * HTMLDTD.c, and SGML_character() in SGML.c may check for their end * tags to call HTML_end_element() directly (with a check in that to * bypass decrementing of the HTML parser's stack). So if we still * have the emphasis (Underline) on, or any open A, FONT, or FORM * containers, turn it off or close them now. - FM & kw * * IF those tags are not declared SGML_EMPTY, but we let the SGML.c * parser take care of correctly stacked ordering, and of correct * wind-down on end-of-stream (in SGML_free SGML_abort), THEN these and * other checks here in HTML.c should not be necessary. Still it can't * hurt to include them. - kw */ if (me->inUnderline) { HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); me->inUnderline = FALSE; me->Underline_Level = 0; CTRACE((tfp, "HTML_free: Ending underline\n")); } if (me->inA) { HTML_end_element(me, HTML_A, &include); me->inA = FALSE; CTRACE((tfp, "HTML_free: Ending HTML_A\n")); } if (me->inFONT) { HTML_end_element(me, HTML_FONT, &include); me->inFONT = FALSE; } if (me->inFORM) { HTML_end_element(me, HTML_FORM, &include); me->inFORM = FALSE; } if (me->option.size > 0) { /* * If we still have data in the me->option chunk after forcing a * close of a still-open form, something must have gone very wrong. * - kw */ if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: SELECT or OPTION not ended properly *****\n"); } HTChunkTerminate(&me->option); /* * Output the left-over data as text, maybe it was invalid markup * meant to be shown somewhere. - kw */ CTRACE((tfp, "HTML_free: ***** leftover option data: %s\n", me->option.data)); HTML_put_string(me, me->option.data); HTChunkClear(&me->option); } if (me->textarea.size > 0) { /* * If we still have data in the me->textarea chunk after forcing a * close of a still-open form, something must have gone very wrong. * - kw */ if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: TEXTAREA not used properly *****\n"); } HTChunkTerminate(&me->textarea); /* * Output the left-over data as text, maybe it was invalid markup * meant to be shown somewhere. - kw */ CTRACE((tfp, "HTML_free: ***** leftover textarea data: %s\n", me->textarea.data)); HTML_put_string(me, me->textarea.data); HTChunkClear(&me->textarea); } /* * If we're interactive and have hidden links but no visible links, add * a message informing the user about this and suggesting use of the * 'l'ist command. - FM */ if (!dump_output_immediately && HText_sourceAnchors(me->text) < 1 && HText_HiddenLinkCount(me->text) > 0) { HTML_start_element(me, HTML_P, 0, 0, -1, &include); HTML_put_character(me, '['); HTML_start_element(me, HTML_EM, 0, 0, -1, &include); HTML_put_string(me, gettext("Document has only hidden links. Use the 'l'ist command.")); HTML_end_element(me, HTML_EM, &include); HTML_put_character(me, ']'); HTML_end_element(me, HTML_P, &include); } if (me->xinclude) { HText_appendText(me->text, " *** LYNX ERROR ***\rUnparsed data:\r"); HText_appendText(me->text, me->xinclude); FREE(me->xinclude); } /* * Now call the cleanup function. - FM */ HText_endAppend(me->text); } if (me->option.size > 0) { /* * If we still have data in the me->option chunk after forcing a close * of a still-open form, something must have gone very wrong. - kw */ if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: SELECT or OPTION not ended properly *****\n"); } if (TRACE) { HTChunkTerminate(&me->option); CTRACE((tfp, "HTML_free: ***** leftover option data: %s\n", me->option.data)); } HTChunkClear(&me->option); } if (me->textarea.size > 0) { /* * If we still have data in the me->textarea chunk after forcing a * close of a still-open form, something must have gone very wrong. - * kw */ if (LYBadHTML(me)) { LYShowBadHTML("Bad HTML: TEXTAREA not used properly *****\n"); } if (TRACE) { HTChunkTerminate(&me->textarea); CTRACE((tfp, "HTML_free: ***** leftover textarea data: %s\n", me->textarea.data)); } HTChunkClear(&me->textarea); } if (me->target) { (*me->targetClass._free) (me->target); } if (me->sp && me->sp->style && GetHTStyleName(me->sp->style)) { if (me->sp->style->id == ST_DivCenter || me->sp->style->id == ST_HeadingCenter || me->sp->style->id == ST_Heading1) { me->sp->style->alignment = HT_CENTER; } else if (me->sp->style->id == ST_DivRight || me->sp->style->id == ST_HeadingRight) { me->sp->style->alignment = HT_RIGHT; } else { me->sp->style->alignment = HT_LEFT; } styles[HTML_PRE]->alignment = HT_LEFT; } FREE(me->base_href); FREE(me->map_address); FREE(me->LastOptionValue); clear_objectdata(me); FREE(me); } static void HTML_abort(HTStructured * me, HTError e) { char *include = NULL; if (me->text) { /* * If we have emphasis on, or open A, FONT, or FORM containers, turn it * off or close them now. - FM */ if (me->inUnderline) { HText_appendCharacter(me->text, LY_UNDERLINE_END_CHAR); me->inUnderline = FALSE; me->Underline_Level = 0; } if (me->inA) { HTML_end_element(me, HTML_A, &include); me->inA = FALSE; } if (me->inFONT) { HTML_end_element(me, HTML_FONT, &include); me->inFONT = FALSE; } if (me->inFORM) { HTML_end_element(me, HTML_FORM, &include); me->inFORM = FALSE; } /* * Now call the cleanup function. - FM */ HText_endAppend(me->text); } if (me->option.size > 0) { /* * If we still have data in the me->option chunk after forcing a close * of a still-open form, something must have gone very wrong. - kw */ if (TRACE) { CTRACE((tfp, "HTML_abort: SELECT or OPTION not ended properly *****\n")); HTChunkTerminate(&me->option); CTRACE((tfp, "HTML_abort: ***** leftover option data: %s\n", me->option.data)); } HTChunkClear(&me->option); } if (me->textarea.size > 0) { /* * If we still have data in the me->textarea chunk after forcing a * close of a still-open form, something must have gone very wrong. - * kw */ if (TRACE) { CTRACE((tfp, "HTML_abort: TEXTAREA not used properly *****\n")); HTChunkTerminate(&me->textarea); CTRACE((tfp, "HTML_abort: ***** leftover textarea data: %s\n", me->textarea.data)); } HTChunkClear(&me->textarea); } if (me->target) { (*me->targetClass._abort) (me->target, e); } if (me->sp && me->sp->style && GetHTStyleName(me->sp->style)) { if (me->sp->style->id == ST_DivCenter || me->sp->style->id == ST_HeadingCenter || me->sp->style->id == ST_Heading1) { me->sp->style->alignment = HT_CENTER; } else if (me->sp->style->id == ST_DivRight || me->sp->style->id == ST_HeadingRight) { me->sp->style->alignment = HT_RIGHT; } else { me->sp->style->alignment = HT_LEFT; } styles[HTML_PRE]->alignment = HT_LEFT; } FREE(me->base_href); FREE(me->map_address); FREE(me->textarea_name); FREE(me->textarea_accept_cs); FREE(me->textarea_id); FREE(me->LastOptionValue); FREE(me->xinclude); clear_objectdata(me); FREE(me); } /* Get Styles from style sheet * --------------------------- */ static void get_styles(void) { HTStyle **st = NULL; styleSheet = DefaultStyle(&st); /* sets st[] array */ default_style = st[ST_Normal]; styles[HTML_H1] = st[ST_Heading1]; styles[HTML_H2] = st[ST_Heading2]; styles[HTML_H3] = st[ST_Heading3]; styles[HTML_H4] = st[ST_Heading4]; styles[HTML_H5] = st[ST_Heading5]; styles[HTML_H6] = st[ST_Heading6]; styles[HTML_HCENTER] = st[ST_HeadingCenter]; styles[HTML_HLEFT] = st[ST_HeadingLeft]; styles[HTML_HRIGHT] = st[ST_HeadingRight]; styles[HTML_DCENTER] = st[ST_DivCenter]; styles[HTML_DLEFT] = st[ST_DivLeft]; styles[HTML_DRIGHT] = st[ST_DivRight]; styles[HTML_DL] = st[ST_Glossary]; /* nested list styles */ styles[HTML_DL1] = st[ST_Glossary1]; styles[HTML_DL2] = st[ST_Glossary2]; styles[HTML_DL3] = st[ST_Glossary3]; styles[HTML_DL4] = st[ST_Glossary4]; styles[HTML_DL5] = st[ST_Glossary5]; styles[HTML_DL6] = st[ST_Glossary6]; styles[HTML_UL] = styles[HTML_OL] = st[ST_List]; /* nested list styles */ styles[HTML_OL1] = st[ST_List1]; styles[HTML_OL2] = st[ST_List2]; styles[HTML_OL3] = st[ST_List3]; styles[HTML_OL4] = st[ST_List4]; styles[HTML_OL5] = st[ST_List5]; styles[HTML_OL6] = st[ST_List6]; styles[HTML_MENU] = styles[HTML_DIR] = st[ST_Menu]; /* nested list styles */ styles[HTML_MENU1] = st[ST_Menu1]; styles[HTML_MENU2] = st[ST_Menu2]; styles[HTML_MENU3] = st[ST_Menu3]; styles[HTML_MENU4] = st[ST_Menu4]; styles[HTML_MENU5] = st[ST_Menu5]; styles[HTML_MENU6] = st[ST_Menu6]; styles[HTML_DLC] = st[ST_GlossaryCompact]; /* nested list styles */ styles[HTML_DLC1] = st[ST_GlossaryCompact1]; styles[HTML_DLC2] = st[ST_GlossaryCompact2]; styles[HTML_DLC3] = st[ST_GlossaryCompact3]; styles[HTML_DLC4] = st[ST_GlossaryCompact4]; styles[HTML_DLC5] = st[ST_GlossaryCompact5]; styles[HTML_DLC6] = st[ST_GlossaryCompact6]; styles[HTML_ADDRESS] = st[ST_Address]; styles[HTML_BANNER] = st[ST_Banner]; styles[HTML_BLOCKQUOTE] = st[ST_Blockquote]; styles[HTML_BQ] = st[ST_Bq]; styles[HTML_FN] = st[ST_Footnote]; styles[HTML_NOTE] = st[ST_Note]; styles[HTML_PLAINTEXT] = styles[HTML_XMP] = st[ST_Example]; styles[HTML_PRE] = st[ST_Preformatted]; styles[HTML_LISTING] = st[ST_Listing]; } /* * If we're called from another module, make sure we've initialized styles * array first. */ HTStyle *LYstyles(int style_number) { if (styles[style_number] == 0) get_styles(); return styles[style_number]; } /* P U B L I C */ /* Structured Object Class * ----------------------- */ const HTStructuredClass HTMLPresentation = /* As opposed to print etc */ { "Lynx_HTML_Handler", HTML_free, HTML_abort, HTML_put_character, HTML_put_string, HTML_write, HTML_start_element, HTML_end_element, HTML_put_entity }; /* New Structured Text object * -------------------------- * * The structured stream can generate either presentation, * or plain text, or HTML. */ HTStructured *HTML_new(HTParentAnchor *anchor, HTFormat format_out, HTStream *stream) { HTStructured *me; CTRACE((tfp, "start HTML_new\n")); if (format_out != WWW_PLAINTEXT && format_out != WWW_PRESENT) { HTStream *intermediate = HTStreamStack(WWW_HTML, format_out, stream, anchor); if (intermediate) return HTMLGenerator(intermediate); fprintf(stderr, "\n** Internal error: can't parse HTML to %s\n", HTAtom_name(format_out)); exit_immediately(EXIT_FAILURE); } me = typecalloc(HTStructured); if (me == NULL) outofmem(__FILE__, "HTML_new"); /* * This used to call 'get_styles()' only on the first time through this * function. However, if the user reloads a page with ^R, the styles[] * array is not necessarily the same as it was from 'get_styles()'. So * we reinitialize the whole thing. */ get_styles(); me->isa = &HTMLPresentation; me->node_anchor = anchor; me->CurrentA = NULL; me->CurrentANum = 0; me->base_href = NULL; me->map_address = NULL; HTChunkInit(&me->title, 128); HTChunkInit(&me->object, 128); me->object_started = FALSE; me->object_declare = FALSE; me->object_shapes = FALSE; me->object_ismap = FALSE; me->object_id = NULL; me->object_title = NULL; me->object_data = NULL; me->object_type = NULL; me->object_classid = NULL; me->object_codebase = NULL; me->object_codetype = NULL; me->object_usemap = NULL; me->object_name = NULL; HTChunkInit(&me->option, 128); me->first_option = TRUE; me->LastOptionValue = NULL; me->LastOptionChecked = FALSE; me->select_disabled = FALSE; HTChunkInit(&me->textarea, 128); me->textarea_name = NULL; me->textarea_name_cs = -1; me->textarea_accept_cs = NULL; me->textarea_cols = 0; me->textarea_rows = 4; me->textarea_id = NULL; HTChunkInit(&me->math, 128); HTChunkInit(&me->style_block, 128); HTChunkInit(&me->script, 128); me->text = 0; me->style_change = YES; /* Force check leading to text creation */ me->new_style = default_style; me->old_style = 0; me->current_default_alignment = HT_LEFT; me->sp = (me->stack + MAX_NESTING - 1); me->skip_stack = 0; me->sp->tag_number = -1; /* INVALID */ me->sp->style = default_style; /* INVALID */ me->sp->style->alignment = HT_LEFT; me->stack_overrun = FALSE; me->Division_Level = -1; me->Underline_Level = 0; me->Quote_Level = 0; me->UsePlainSpace = FALSE; me->HiddenValue = FALSE; me->lastraw = -1; /* * Used for nested lists. - FM */ me->List_Nesting_Level = -1; /* counter for list nesting level */ LYZero_OL_Counter(me); /* Initializes OL_Counter[] and OL_Type[] */ me->Last_OL_Count = 0; /* last count in ordered lists */ me->Last_OL_Type = '1'; /* last type in ordered lists */ me->inA = FALSE; me->inAPPLET = FALSE; me->inAPPLETwithP = FALSE; me->inBadBASE = FALSE; me->inBadHREF = FALSE; me->inBadHTML = FALSE; me->inBASE = FALSE; me->node_anchor->inBASE = FALSE; me->inBoldA = FALSE; me->inBoldH = FALSE; me->inCAPTION = FALSE; me->inCREDIT = FALSE; me->inFIG = FALSE; me->inFIGwithP = FALSE; me->inFONT = FALSE; me->inFORM = FALSE; me->inLABEL = FALSE; me->inP = FALSE; me->inPRE = FALSE; me->inSELECT = FALSE; me->inTABLE = FALSE; me->inUnderline = FALSE; me->needBoldH = FALSE; me->comment_start = NULL; me->comment_end = NULL; #ifdef USE_COLOR_STYLE #ifdef LY_FIND_LEAKS if (Style_className == 0) { atexit(free_Style_className); } #endif addClassName("", "", (size_t) 0); class_string[0] = '\0'; #endif /* * Create a chartrans stage info structure for the anchor, if it does not * exist already (in which case the default MIME stage info will be loaded * as well), and load the HTML stage info into me->UCI and me->UCLYhndl. - * FM */ LYGetChartransInfo(me); UCTransParams_clear(&me->T); /* * Load the existing or default input charset info into the holding * elements. We'll believe what is indicated for UCT_STAGE_PARSER. - FM */ me->inUCLYhndl = HTAnchor_getUCLYhndl(me->node_anchor, UCT_STAGE_PARSER); if (me->inUCLYhndl < 0) { me->inUCLYhndl = HTAnchor_getUCLYhndl(me->node_anchor, UCT_STAGE_MIME); me->inUCI = HTAnchor_getUCInfoStage(me->node_anchor, UCT_STAGE_MIME); } else { me->inUCI = HTAnchor_getUCInfoStage(me->node_anchor, UCT_STAGE_PARSER); } /* * Load the existing or default output charset info into the holding * elements, UCT_STAGE_STRUCTURED should be the same as UCT_STAGE_TEXT at * this point, but we could check, perhaps. - FM */ me->outUCI = HTAnchor_getUCInfoStage(me->node_anchor, UCT_STAGE_STRUCTURED); me->outUCLYhndl = HTAnchor_getUCLYhndl(me->node_anchor, UCT_STAGE_STRUCTURED); me->target = stream; if (stream) me->targetClass = *stream->isa; /* Copy pointers */ return (HTStructured *) me; } #ifdef USE_SOURCE_CACHE /* * A flag set by a file write error. Used for only generating an alert the * first time such an error happens, since Lynx should still be usable if the * temp space becomes full, and an alert each time a cache file cannot be * written would be annoying. Reset when lynx.cfg is being reloaded (user may * change SOURCE_CACHE setting). - kw */ BOOLEAN source_cache_file_error = FALSE; /* * Pass-thru cache HTStream */ static void CacheThru_do_free(HTStream *me) { if (me->anchor->source_cache_file) { CTRACE((tfp, "SourceCacheWriter: Removing previous file %s\n", me->anchor->source_cache_file)); (void) LYRemoveTemp(me->anchor->source_cache_file); FREE(me->anchor->source_cache_file); } if (me->anchor->source_cache_chunk) { CTRACE((tfp, "SourceCacheWriter: Removing previous memory chunk %p\n", (void *) me->anchor->source_cache_chunk)); HTChunkFree(me->anchor->source_cache_chunk); me->anchor->source_cache_chunk = NULL; } if (me->fp) { fflush(me->fp); if (ferror(me->fp)) me->status = HT_ERROR; LYCloseTempFP(me->fp); if (me->status == HT_OK) { char *cp_freeme = 0; me->anchor->source_cache_file = me->filename; CTRACE((tfp, "SourceCacheWriter: Committing file %s for URL %s to anchor\n", me->filename, cp_freeme = HTAnchor_address((HTAnchor *) me->anchor))); FREE(cp_freeme); } else { if (source_cache_file_error == FALSE) { HTAlert(gettext("Source cache error - disk full?")); source_cache_file_error = TRUE; } (void) LYRemoveTemp(me->filename); me->anchor->source_cache_file = NULL; } } else if (me->status != HT_OK) { if (me->chunk) { CTRACE((tfp, "SourceCacheWriter: memory chunk %p had errors.\n", (void *) me->chunk)); HTChunkFree(me->chunk); me->chunk = me->last_chunk = NULL; } HTAlert(gettext("Source cache error - not enough memory!")); } if (me->chunk) { char *cp_freeme = NULL; me->anchor->source_cache_chunk = me->chunk; CTRACE((tfp, "SourceCacheWriter: Committing memory chunk %p for URL %s to anchor\n", (void *) me->chunk, cp_freeme = HTAnchor_address((HTAnchor *) me->anchor))); FREE(cp_freeme); } } static void CacheThru_free(HTStream *me) { CacheThru_do_free(me); (*me->actions->_free) (me->target); FREE(me); } static void CacheThru_abort(HTStream *me, HTError e) { if (me->fp) LYCloseTempFP(me->fp); if (LYCacheSourceForAborted == SOURCE_CACHE_FOR_ABORTED_DROP) { if (me->filename) { CTRACE((tfp, "SourceCacheWriter: Removing active file %s\n", me->filename)); (void) LYRemoveTemp(me->filename); FREE(me->filename); } if (me->chunk) { CTRACE((tfp, "SourceCacheWriter: Removing active memory chunk %p\n", (void *) me->chunk)); HTChunkFree(me->chunk); } } else { me->status = HT_OK; /*fake it */ CacheThru_do_free(me); } (*me->actions->_abort) (me->target, e); FREE(me); } /* * FIXME: never used! */ static void CacheThru_put_character(HTStream *me, int c_in) { if (me->status == HT_OK) { if (me->fp) { fputc(c_in, me->fp); } else if (me->chunk) { me->last_chunk = HTChunkPutc2(me->last_chunk, c_in); if (me->last_chunk == NULL || me->last_chunk->allocated == 0) me->status = HT_ERROR; } } (*me->actions->put_character) (me->target, c_in); } /* * FIXME: never used! */ static void CacheThru_put_string(HTStream *me, const char *str) { if (me->status == HT_OK) { if (me->fp) { fputs(str, me->fp); } else if (me->chunk) { me->last_chunk = HTChunkPuts2(me->last_chunk, str); if (me->last_chunk == NULL || me->last_chunk->allocated == 0) me->status = HT_ERROR; } } (*me->actions->put_string) (me->target, str); } static void CacheThru_write(HTStream *me, const char *str, int l) { if (me->status == HT_OK && l != 0) { if (me->fp) { if (fwrite(str, (size_t) 1, (size_t) l, me->fp) < (size_t) l || ferror(me->fp)) { me->status = HT_ERROR; } } else if (me->chunk) { me->last_chunk = HTChunkPutb2(me->last_chunk, str, l); if (me->last_chunk == NULL || me->last_chunk->allocated == 0) me->status = HT_ERROR; } } (*me->actions->put_block) (me->target, str, l); } static const HTStreamClass PassThruCache = { "PassThruCache", CacheThru_free, CacheThru_abort, CacheThru_put_character, CacheThru_put_string, CacheThru_write }; static HTStream *CacheThru_new(HTParentAnchor *anchor, HTStream *target) { char *cp_freeme = NULL; char filename[LY_MAXPATH]; HTStream *stream = NULL; HTProtocol *p = (HTProtocol *) anchor->protocol; /* * Neatly and transparently vanish if source caching is disabled. */ if (LYCacheSource == SOURCE_CACHE_NONE) return target; #ifndef DEBUG_SOURCE_CACHE /* Only remote HTML documents may benefit from HTreparse_document(), */ /* oh, assume http protocol: */ if (strcmp(p->name, "http") != 0 && strcmp(p->name, "https") != 0) { CTRACE((tfp, "SourceCacheWriter: Protocol is \"%s\"; not cached\n", p->name)); return target; } #else /* all HTStreams will be cached */ #endif CTRACE((tfp, "start CacheThru_new\n")); stream = (HTStream *) malloc(sizeof(*stream)); if (!stream) outofmem(__FILE__, "CacheThru_new"); stream->isa = &PassThruCache; stream->anchor = anchor; stream->fp = NULL; stream->filename = NULL; stream->chunk = NULL; stream->target = target; stream->actions = target->isa; stream->status = HT_OK; if (LYCacheSource == SOURCE_CACHE_FILE) { if (anchor->source_cache_file) { CTRACE((tfp, "SourceCacheWriter: If successful, will replace source cache file %s\n", anchor->source_cache_file)); } /* * We open the temp file in binary mode to make sure that * end-of-line stuff and high-bit Latin-1 (or other) characters * don't get munged; this way, the file should (knock on wood) * contain exactly what came in from the network. */ if (!(stream->fp = LYOpenTemp(filename, HTML_SUFFIX, BIN_W))) { CTRACE((tfp, "SourceCacheWriter: Cannot open source cache file for URL %s\n", cp_freeme = HTAnchor_address((HTAnchor *) anchor))); FREE(stream); FREE(cp_freeme); return target; } StrAllocCopy(stream->filename, filename); CTRACE((tfp, "SourceCacheWriter: Caching source for URL %s in file %s\n", cp_freeme = HTAnchor_address((HTAnchor *) anchor), filename)); FREE(cp_freeme); } if (LYCacheSource == SOURCE_CACHE_MEMORY) { if (anchor->source_cache_chunk) { CTRACE((tfp, "SourceCacheWriter: If successful, will replace memory chunk %p\n", (void *) anchor->source_cache_chunk)); } stream->chunk = stream->last_chunk = HTChunkCreateMayFail(4096, 1); if (!stream->chunk) /* failed already? pretty bad... - kw */ stream->status = HT_ERROR; CTRACE((tfp, "SourceCacheWriter: Caching source for URL %s in memory chunk %p\n", cp_freeme = HTAnchor_address((HTAnchor *) anchor), (void *) stream->chunk)); FREE(cp_freeme); } return stream; } #else #define CacheThru_new(anchor, target) target #endif /* HTConverter for HTML to plain text * ---------------------------------- * * This will convert from HTML to presentation or plain text. * * It is registered in HTInit.c, but never actually used by lynx. * - kw 1999-03-15 */ HTStream *HTMLToPlain(HTPresentation *pres, HTParentAnchor *anchor, HTStream *sink) { CTRACE((tfp, "HTMLToPlain calling CacheThru_new\n")); return CacheThru_new(anchor, SGML_new(&HTML_dtd, anchor, HTML_new(anchor, pres->rep_out, sink))); } /* HTConverter for HTML source to plain text * ----------------------------------------- * * This will preparse HTML and convert back to presentation or plain text. * * It is registered in HTInit.c and used by lynx if invoked with * -preparsed. The stream generated here will be fed with HTML text, * It feeds that to the SGML.c parser, which in turn feeds an HTMLGen.c * structured stream for regenerating flat text; the latter should * end up being handled as text/plain. - kw */ HTStream *HTMLParsedPresent(HTPresentation *pres, HTParentAnchor *anchor, HTStream *sink) { HTStream *intermediate = sink; if (!intermediate) { /* * Trick to prevent HTPlainPresent from translating again. Temporarily * change UCT_STAGE_PARSER setting in anchor while the HTPlain stream * is initialized, so that HTPlain sees its input and output charsets * as the same. - kw */ int old_parser_cset = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_PARSER); int structured_cset = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_STRUCTURED); if (structured_cset < 0) structured_cset = HTAnchor_getUCLYhndl(anchor, UCT_STAGE_HTEXT); if (structured_cset < 0) structured_cset = current_char_set; HTAnchor_setUCInfoStage(anchor, structured_cset, UCT_STAGE_PARSER, UCT_SETBY_MIME); if (pres->rep_out == WWW_SOURCE) { /* same effect as intermediate = HTPlainPresent(pres, anchor, NULL); just written in a more general way: */ intermediate = HTStreamStack(WWW_PLAINTEXT, WWW_PRESENT, NULL, anchor); } else { /* this too should amount to calling HTPlainPresent: */ intermediate = HTStreamStack(WWW_PLAINTEXT, pres->rep_out, NULL, anchor); } if (old_parser_cset != structured_cset) { HTAnchor_resetUCInfoStage(anchor, old_parser_cset, UCT_STAGE_PARSER, UCT_SETBY_NONE); if (old_parser_cset >= 0) { HTAnchor_setUCInfoStage(anchor, old_parser_cset, UCT_STAGE_PARSER, UCT_SETBY_DEFAULT + 1); } } } if (!intermediate) return NULL; CTRACE((tfp, "HTMLParsedPresent calling CacheThru_new\n")); return CacheThru_new(anchor, SGML_new(&HTML_dtd, anchor, HTMLGenerator(intermediate))); } /* HTConverter for HTML to C code * ------------------------------ * * C code is like plain text but all non-preformatted code * is commented out. * This will convert from HTML to presentation or plain text. * * It is registered in HTInit.c, but normally not used by lynx. * - kw 1999-03-15 */ HTStream *HTMLToC(HTPresentation *pres GCC_UNUSED, HTParentAnchor *anchor, HTStream *sink) { HTStructured *html; if (sink) (*sink->isa->put_string) (sink, "/* "); /* Before even title */ html = HTML_new(anchor, WWW_PLAINTEXT, sink); html->comment_start = "/* "; html->comment_end = " */\n"; /* Must start in col 1 for cpp */ if (!sink) HTML_put_string(html, html->comment_start); CTRACE((tfp, "HTMLToC calling CacheThru_new\n")); return CacheThru_new(anchor, SGML_new(&HTML_dtd, anchor, html)); } /* Presenter for HTML * ------------------ * * This will convert from HTML to presentation or plain text. * * (Comment from original libwww:) * Override this if you have a windows version */ #ifndef GUI HTStream *HTMLPresent(HTPresentation *pres GCC_UNUSED, HTParentAnchor *anchor, HTStream *sink GCC_UNUSED) { CTRACE((tfp, "HTMLPresent calling CacheThru_new\n")); return CacheThru_new(anchor, SGML_new(&HTML_dtd, anchor, HTML_new(anchor, WWW_PRESENT, NULL))); } #endif /* !GUI */ /* (Comments from original libwww:) */ /* Record error message as a hypertext object * ------------------------------------------ * * The error message should be marked as an error so that * it can be reloaded later. * This implementation just throws up an error message * and leaves the document unloaded. * A smarter implementation would load an error document, * marking at such so that it is retried on reload. * * On entry, * sink is a stream to the output device if any * number is the HTTP error number * message is the human readable message. * * On exit, * returns a negative number to indicate lack of success in the load. */ /* (We don't actually do any of that hypertext stuff for errors, the trivial implementation for lynx just generates a message and returns. - kw 1999-03-15) */ int HTLoadError(HTStream *sink GCC_UNUSED, int number, const char *message) { HTAlert(message); /* @@@@@@@@@@@@@@@@@@@ */ return -number; } static char *MakeNewTitle(STRING2PTR value, int src_type) { char *ptr; char *newtitle = NULL; StrAllocCopy(newtitle, "["); if (value != 0 && value[src_type] != 0) { ptr = strrchr(value[src_type], '/'); if (!ptr) { StrAllocCat(newtitle, value[src_type]); } else { StrAllocCat(newtitle, ptr + 1); } } else { ptr = 0; } #ifdef SH_EX /* 1998/04/02 (Thu) 16:02:00 */ /* for proxy server 1998/12/19 (Sat) 11:53:30 */ if (AS_casecomp(newtitle + 1, "internal-gopher-menu") == 0) { StrAllocCopy(newtitle, "+"); } else if (AS_casecomp(newtitle + 1, "internal-gopher-unknown") == 0) { StrAllocCopy(newtitle, " "); } else { /* normal title */ ptr = strrchr(newtitle, '.'); if (ptr) { if (AS_casecomp(ptr, ".gif") == 0) *ptr = '\0'; else if (AS_casecomp(ptr, ".jpg") == 0) *ptr = '\0'; else if (AS_casecomp(ptr, ".jpeg") == 0) *ptr = '\0'; } StrAllocCat(newtitle, "]"); } #else StrAllocCat(newtitle, "]"); #endif return newtitle; } static char *MakeNewImageValue(STRING2PTR value) { char *ptr; char *newtitle = NULL; StrAllocCopy(newtitle, "["); ptr = (value[HTML_INPUT_SRC] ? strrchr(value[HTML_INPUT_SRC], '/') : 0); if (!ptr) { StrAllocCat(newtitle, value[HTML_INPUT_SRC]); } else { StrAllocCat(newtitle, ptr + 1); } StrAllocCat(newtitle, "]-Submit"); return newtitle; } static char *MakeNewMapValue(STRING2PTR value, const char *mapstr) { char *ptr; char *newtitle = NULL; StrAllocCopy(newtitle, "["); StrAllocCat(newtitle, mapstr); /* ISMAP or USEMAP */ if (verbose_img && non_empty(value[HTML_IMG_SRC])) { StrAllocCat(newtitle, ":"); ptr = strrchr(value[HTML_IMG_SRC], '/'); if (!ptr) { StrAllocCat(newtitle, value[HTML_IMG_SRC]); } else { StrAllocCat(newtitle, ptr + 1); } } StrAllocCat(newtitle, "]"); return newtitle; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_2515_2
crossvul-cpp_data_good_3348_0
/* Copyright (c) 2013. The YARA Authors. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This module implements a structure I've called "arena". An arena is a data container composed of a set of pages. The arena grows automatically when needed by adding new pages to hold new data. Arenas can be saved and loaded from files. */ #include <string.h> #include <assert.h> #include <stdlib.h> #include <stdarg.h> #include <stddef.h> #include <time.h> #include <yara/arena.h> #include <yara/mem.h> #include <yara/error.h> #include <yara/limits.h> #pragma pack(push) #pragma pack(1) typedef struct _ARENA_FILE_HEADER { char magic[4]; uint32_t size; uint32_t version; } ARENA_FILE_HEADER; #pragma pack(pop) #define free_space(page) \ ((page)->size - (page)->used) // // _yr_arena_new_page // // Creates a new arena page of a given size // // Args: // size_t size - Size of the page // // Returns: // A pointer to the newly created YR_ARENA_PAGE structure // YR_ARENA_PAGE* _yr_arena_new_page( size_t size) { YR_ARENA_PAGE* new_page; new_page = (YR_ARENA_PAGE*) yr_malloc(sizeof(YR_ARENA_PAGE)); if (new_page == NULL) return NULL; new_page->address = (uint8_t*) yr_malloc(size); if (new_page->address == NULL) { yr_free(new_page); return NULL; } new_page->size = size; new_page->used = 0; new_page->next = NULL; new_page->prev = NULL; new_page->reloc_list_head = NULL; new_page->reloc_list_tail = NULL; return new_page; } // // _yr_arena_page_for_address // // Returns the page within the arena where an address reside. // // Args: // YR_ARENA* arena - Pointer to the arena // void* address - Address to be located // // Returns: // A pointer the corresponding YR_ARENA_PAGE structure where the address // resides. // YR_ARENA_PAGE* _yr_arena_page_for_address( YR_ARENA* arena, void* address) { YR_ARENA_PAGE* page; // Most of the times this function is called with an address within // the current page, let's check the current page first to avoid // looping through the page list. page = arena->current_page; if (page != NULL && (uint8_t*) address >= page->address && (uint8_t*) address < page->address + page->used) return page; page = arena->page_list_head; while (page != NULL) { if ((uint8_t*) address >= page->address && (uint8_t*) address < page->address + page->used) return page; page = page->next; } return NULL; } // // _yr_arena_make_relocatable // // Tells the arena that certain addresses contains a relocatable pointer. // // Args: // YR_ARENA* arena - Pointer the arena // void* address - Base address // va_list offsets - List of offsets relative to base address // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int _yr_arena_make_relocatable( YR_ARENA* arena, void* base, va_list offsets) { YR_RELOC* reloc; YR_ARENA_PAGE* page; size_t offset; size_t base_offset; int result = ERROR_SUCCESS; page = _yr_arena_page_for_address(arena, base); assert(page != NULL); base_offset = (uint8_t*) base - page->address; offset = va_arg(offsets, size_t); while (offset != -1) { assert(page->used >= sizeof(int64_t)); assert(base_offset + offset <= page->used - sizeof(int64_t)); reloc = (YR_RELOC*) yr_malloc(sizeof(YR_RELOC)); if (reloc == NULL) return ERROR_INSUFFICIENT_MEMORY; reloc->offset = (uint32_t) (base_offset + offset); reloc->next = NULL; if (page->reloc_list_head == NULL) page->reloc_list_head = reloc; if (page->reloc_list_tail != NULL) page->reloc_list_tail->next = reloc; page->reloc_list_tail = reloc; offset = va_arg(offsets, size_t); } return result; } // // yr_arena_create // // Creates a new arena. // // Args: // size_t initial_size - Initial size // int flags - Flags // YR_ARENA** arena - Address where a pointer to the new arena will be // written to. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_create( size_t initial_size, int flags, YR_ARENA** arena) { YR_ARENA* new_arena; YR_ARENA_PAGE* new_page; *arena = NULL; new_arena = (YR_ARENA*) yr_malloc(sizeof(YR_ARENA)); if (new_arena == NULL) return ERROR_INSUFFICIENT_MEMORY; new_page = _yr_arena_new_page(initial_size); if (new_page == NULL) { yr_free(new_arena); return ERROR_INSUFFICIENT_MEMORY; } new_arena->page_list_head = new_page; new_arena->current_page = new_page; new_arena->flags = flags | ARENA_FLAGS_COALESCED; *arena = new_arena; return ERROR_SUCCESS; } // // yr_arena_destroy // // Destroys an arena releasing its resource. // // Args: // YR_ARENA* arena - Pointer to the arena. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // void yr_arena_destroy( YR_ARENA* arena) { YR_RELOC* reloc; YR_RELOC* next_reloc; YR_ARENA_PAGE* page; YR_ARENA_PAGE* next_page; if (arena == NULL) return; page = arena->page_list_head; while(page != NULL) { next_page = page->next; reloc = page->reloc_list_head; while (reloc != NULL) { next_reloc = reloc->next; yr_free(reloc); reloc = next_reloc; } yr_free(page->address); yr_free(page); page = next_page; } yr_free(arena); } // // yr_arena_base_address // // Returns the base address for the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // // Returns: // A pointer to the arena's data. NULL if no data has been written to // the arena yet. // void* yr_arena_base_address( YR_ARENA* arena) { if (arena->page_list_head->used == 0) return NULL; return arena->page_list_head->address; } // // yr_arena_next_address // // Given an address and an offset, returns the address where // address + offset resides. The arena is a collection of non-contiguous // regions of memory (pages), if address is pointing at the end of a page, // address + offset could cross the page boundary and point at somewhere // within the next page, this function handles these situations. It works // also with negative offsets. // // Args: // YR_ARENA* arena - Pointer to the arena. // void* address - Base address. // int offset - Offset. // // Returns: // A pointer // void* yr_arena_next_address( YR_ARENA* arena, void* address, size_t offset) { YR_ARENA_PAGE* page; page = _yr_arena_page_for_address(arena, address); assert(page != NULL); if ((uint8_t*) address + offset >= page->address && (uint8_t*) address + offset < page->address + page->used) { return (uint8_t*) address + offset; } if (offset > 0) { offset -= page->address + page->used - (uint8_t*) address; page = page->next; while (page != NULL) { if (offset < page->used) return page->address + offset; offset -= page->used; page = page->next; } } else { offset += page->used; page = page->prev; while (page != NULL) { if (offset < page->used) return page->address + page->used + offset; offset += page->used; page = page->prev; } } return NULL; } // // yr_arena_coalesce // // Coalesce the arena into a single page. This is a required step before // saving the arena to a file. // // Args: // YR_ARENA* arena - Pointer to the arena. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_coalesce( YR_ARENA* arena) { YR_ARENA_PAGE* page; YR_ARENA_PAGE* big_page; YR_ARENA_PAGE* next_page; YR_RELOC* reloc; uint8_t** reloc_address; uint8_t* reloc_target; size_t total_size = 0; page = arena->page_list_head; while(page != NULL) { total_size += page->used; page = page->next; } // Create a new page that will contain the entire arena. big_page = _yr_arena_new_page(total_size); if (big_page == NULL) return ERROR_INSUFFICIENT_MEMORY; // Copy data from current pages to the big page and adjust relocs. page = arena->page_list_head; while (page != NULL) { page->new_address = big_page->address + big_page->used; memcpy(page->new_address, page->address, page->used); reloc = page->reloc_list_head; while(reloc != NULL) { reloc->offset += (uint32_t) big_page->used; reloc = reloc->next; } if (big_page->reloc_list_head == NULL) big_page->reloc_list_head = page->reloc_list_head; if (big_page->reloc_list_tail != NULL) big_page->reloc_list_tail->next = page->reloc_list_head; if (page->reloc_list_tail != NULL) big_page->reloc_list_tail = page->reloc_list_tail; big_page->used += page->used; page = page->next; } // Relocate pointers. reloc = big_page->reloc_list_head; while (reloc != NULL) { reloc_address = (uint8_t**) (big_page->address + reloc->offset); reloc_target = *reloc_address; if (reloc_target != NULL) { page = _yr_arena_page_for_address(arena, reloc_target); assert(page != NULL); *reloc_address = page->new_address + (reloc_target - page->address); } reloc = reloc->next; } // Release current pages. page = arena->page_list_head; while(page != NULL) { next_page = page->next; yr_free(page->address); yr_free(page); page = next_page; } arena->page_list_head = big_page; arena->current_page = big_page; arena->flags |= ARENA_FLAGS_COALESCED; return ERROR_SUCCESS; } // // yr_arena_reserve_memory // // Ensures that the arena have enough contiguous memory for future allocations. // if the available space in the current page is lower than "size", a new page // is allocated. // // Args: // YR_ARENA* arena - Pointer to the arena. // size_t size - Size of the region to be reserved. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_reserve_memory( YR_ARENA* arena, size_t size) { YR_ARENA_PAGE* new_page; size_t new_page_size; uint8_t* new_page_address; if (size > free_space(arena->current_page)) { if (arena->flags & ARENA_FLAGS_FIXED_SIZE) return ERROR_INSUFFICIENT_MEMORY; // Requested space is bigger than current page's empty space, // lets calculate the size for a new page. new_page_size = arena->current_page->size * 2; while (new_page_size < size) new_page_size *= 2; if (arena->current_page->used == 0) { // Current page is not used at all, it can be reallocated. new_page_address = (uint8_t*) yr_realloc( arena->current_page->address, new_page_size); if (new_page_address == NULL) return ERROR_INSUFFICIENT_MEMORY; arena->current_page->address = new_page_address; arena->current_page->size = new_page_size; } else { new_page = _yr_arena_new_page(new_page_size); if (new_page == NULL) return ERROR_INSUFFICIENT_MEMORY; new_page->prev = arena->current_page; arena->current_page->next = new_page; arena->current_page = new_page; arena->flags &= ~ARENA_FLAGS_COALESCED; } } return ERROR_SUCCESS; } // // yr_arena_allocate_memory // // Allocates memory within the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // size_t size - Size of the region to be allocated. // void** allocated_memory - Address of a pointer to newly allocated // region. // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_allocate_memory( YR_ARENA* arena, size_t size, void** allocated_memory) { FAIL_ON_ERROR(yr_arena_reserve_memory(arena, size)); *allocated_memory = arena->current_page->address + \ arena->current_page->used; arena->current_page->used += size; return ERROR_SUCCESS; } // // yr_arena_allocate_struct // // Allocates a structure within the arena. This function is similar to // yr_arena_allocate_memory but additionally receives a variable-length // list of offsets within the structure where pointers reside. This allows // the arena to keep track of pointers that must be adjusted when memory // is relocated. This is an example on how to invoke this function: // // yr_arena_allocate_struct( // arena, // sizeof(MY_STRUCTURE), // (void**) &my_structure_ptr, // offsetof(MY_STRUCTURE, field_1), // offsetof(MY_STRUCTURE, field_2), // .. // offsetof(MY_STRUCTURE, field_N), // EOL); // // Args: // YR_ARENA* arena - Pointer to the arena. // size_t size - Size of the region to be allocated. // void** allocated_memory - Address of a pointer to newly allocated // region. // ... - Variable number of offsets relative to the // beginning of the struct. Offsets are of type // size_t. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_allocate_struct( YR_ARENA* arena, size_t size, void** allocated_memory, ...) { int result; va_list offsets; va_start(offsets, allocated_memory); result = yr_arena_allocate_memory(arena, size, allocated_memory); if (result == ERROR_SUCCESS) result = _yr_arena_make_relocatable(arena, *allocated_memory, offsets); va_end(offsets); if (result == ERROR_SUCCESS) memset(*allocated_memory, 0, size); return result; } // // yr_arena_make_relocatable // // Tells the arena that certain addresses contains a relocatable pointer. // // Args: // YR_ARENA* arena - Pointer to the arena. // void* base - Address within the arena. // ... - Variable number of size_t arguments with offsets // relative to base. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_make_relocatable( YR_ARENA* arena, void* base, ...) { int result; va_list offsets; va_start(offsets, base); result = _yr_arena_make_relocatable(arena, base, offsets); va_end(offsets); return result; } // // yr_arena_write_data // // Writes data to the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // void* data - Pointer to data to be written. // size_t size - Size of data. // void** written_data - Address where a pointer to the written data will // be returned. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_write_data( YR_ARENA* arena, void* data, size_t size, void** written_data) { void* output; int result; if (size > free_space(arena->current_page)) { result = yr_arena_allocate_memory(arena, size, &output); if (result != ERROR_SUCCESS) return result; } else { output = arena->current_page->address + arena->current_page->used; arena->current_page->used += size; } memcpy(output, data, size); if (written_data != NULL) *written_data = output; return ERROR_SUCCESS; } // // yr_arena_write_string // // Writes string to the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // const char* string - Pointer to string to be written. // char** written_string - Address where a pointer to the written data will // be returned. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_write_string( YR_ARENA* arena, const char* string, char** written_string) { return yr_arena_write_data( arena, (void*) string, strlen(string) + 1, (void**) written_string); } // // yr_arena_append // // Appends source_arena to target_arena. This operation destroys source_arena, // after returning any pointer to source_arena is no longer valid. The data // from source_arena is guaranteed to be aligned to a 16 bytes boundary when // written to the source_arena // // Args: // YR_ARENA* target_arena - Pointer to target the arena. // YR_ARENA* source_arena - Pointer to source arena. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_append( YR_ARENA* target_arena, YR_ARENA* source_arena) { uint8_t padding_data[15]; size_t padding_size = 16 - target_arena->current_page->used % 16; if (padding_size < 16) { memset(&padding_data, 0xCC, padding_size); FAIL_ON_ERROR(yr_arena_write_data( target_arena, padding_data, padding_size, NULL)); } target_arena->current_page->next = source_arena->page_list_head; source_arena->page_list_head->prev = target_arena->current_page; target_arena->current_page = source_arena->current_page; yr_free(source_arena); return ERROR_SUCCESS; } // // yr_arena_duplicate // // Duplicates the arena, making an exact copy. This function requires the // arena to be coalesced. // // Args: // YR_ARENA* arena - Pointer to the arena. // YR_ARENA** duplicated - Address where a pointer to the new arena arena // will be returned. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_duplicate( YR_ARENA* arena, YR_ARENA** duplicated) { YR_RELOC* reloc; YR_RELOC* new_reloc; YR_ARENA_PAGE* page; YR_ARENA_PAGE* new_page; YR_ARENA* new_arena; uint8_t** reloc_address; uint8_t* reloc_target; // Only coalesced arenas can be duplicated. assert(arena->flags & ARENA_FLAGS_COALESCED); page = arena->page_list_head; FAIL_ON_ERROR(yr_arena_create(page->size, arena->flags, &new_arena)); new_page = new_arena->current_page; new_page->used = page->used; memcpy(new_page->address, page->address, page->size); reloc = page->reloc_list_head; while (reloc != NULL) { new_reloc = (YR_RELOC*) yr_malloc(sizeof(YR_RELOC)); if (new_reloc == NULL) { yr_arena_destroy(new_arena); return ERROR_INSUFFICIENT_MEMORY; } new_reloc->offset = reloc->offset; new_reloc->next = NULL; if (new_page->reloc_list_head == NULL) new_page->reloc_list_head = new_reloc; if (new_page->reloc_list_tail != NULL) new_page->reloc_list_tail->next = new_reloc; new_page->reloc_list_tail = new_reloc; reloc_address = (uint8_t**) (new_page->address + new_reloc->offset); reloc_target = *reloc_address; if (reloc_target != NULL) { assert(reloc_target >= page->address); assert(reloc_target < page->address + page->used); *reloc_address = reloc_target - \ page->address + \ new_page->address; } reloc = reloc->next; } *duplicated = new_arena; return ERROR_SUCCESS; } // // yr_arena_load_stream // // Loads an arena from a stream. // // Args: // YR_STREAM* stream - Pointer to stream object // YR_ARENA** - Address where a pointer to the loaded arena // will be returned // // Returns: // ERROR_SUCCESS if successful, appropriate error code otherwise. // int yr_arena_load_stream( YR_STREAM* stream, YR_ARENA** arena) { YR_ARENA_PAGE* page; YR_ARENA* new_arena; ARENA_FILE_HEADER header; uint32_t reloc_offset; uint8_t** reloc_address; uint8_t* reloc_target; int result; if (yr_stream_read(&header, sizeof(header), 1, stream) != 1) return ERROR_INVALID_FILE; if (header.magic[0] != 'Y' || header.magic[1] != 'A' || header.magic[2] != 'R' || header.magic[3] != 'A') { return ERROR_INVALID_FILE; } if (header.size < 2048) // compiled rules are always larger than 2KB return ERROR_CORRUPT_FILE; if (header.version != ARENA_FILE_VERSION) return ERROR_UNSUPPORTED_FILE_VERSION; result = yr_arena_create(header.size, 0, &new_arena); if (result != ERROR_SUCCESS) return result; page = new_arena->current_page; if (yr_stream_read(page->address, header.size, 1, stream) != 1) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } page->used = header.size; if (yr_stream_read(&reloc_offset, sizeof(reloc_offset), 1, stream) != 1) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } while (reloc_offset != 0xFFFFFFFF) { if (reloc_offset > header.size - sizeof(uint8_t*)) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } yr_arena_make_relocatable(new_arena, page->address, reloc_offset, EOL); reloc_address = (uint8_t**) (page->address + reloc_offset); reloc_target = *reloc_address; if (reloc_target != (uint8_t*) (size_t) 0xFFFABADA) *reloc_address += (size_t) page->address; else *reloc_address = 0; if (yr_stream_read(&reloc_offset, sizeof(reloc_offset), 1, stream) != 1) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } } *arena = new_arena; return ERROR_SUCCESS; } // // yr_arena_save_stream // // Saves the arena into a stream. If the file exists its overwritten. This // function requires the arena to be coalesced. // // Args: // YR_ARENA* arena - Pointer to the arena. // YR_STREAM* stream - Pointer to stream object. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_save_stream( YR_ARENA* arena, YR_STREAM* stream) { YR_ARENA_PAGE* page; YR_RELOC* reloc; ARENA_FILE_HEADER header; uint32_t end_marker = 0xFFFFFFFF; uint8_t** reloc_address; uint8_t* reloc_target; // Only coalesced arenas can be saved. assert(arena->flags & ARENA_FLAGS_COALESCED); page = arena->page_list_head; reloc = page->reloc_list_head; // Convert pointers to offsets before saving. while (reloc != NULL) { reloc_address = (uint8_t**) (page->address + reloc->offset); reloc_target = *reloc_address; if (reloc_target != NULL) { assert(reloc_target >= page->address); assert(reloc_target < page->address + page->used); *reloc_address = (uint8_t*) (*reloc_address - page->address); } else { *reloc_address = (uint8_t*) (size_t) 0xFFFABADA; } reloc = reloc->next; } assert(page->size < 0x80000000); // 2GB header.magic[0] = 'Y'; header.magic[1] = 'A'; header.magic[2] = 'R'; header.magic[3] = 'A'; header.size = (int32_t) page->size; header.version = ARENA_FILE_VERSION; yr_stream_write(&header, sizeof(header), 1, stream); yr_stream_write(page->address, header.size, 1, stream); reloc = page->reloc_list_head; // Convert offsets back to pointers. while (reloc != NULL) { yr_stream_write(&reloc->offset, sizeof(reloc->offset), 1, stream); reloc_address = (uint8_t**) (page->address + reloc->offset); reloc_target = *reloc_address; if (reloc_target != (void*) (size_t) 0xFFFABADA) *reloc_address += (size_t) page->address; else *reloc_address = 0; reloc = reloc->next; } yr_stream_write(&end_marker, sizeof(end_marker), 1, stream); return ERROR_SUCCESS; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_3348_0
crossvul-cpp_data_bad_84_0
/* ** vm.c - virtual machine for mruby ** ** See Copyright Notice in mruby.h */ #include <stddef.h> #include <stdarg.h> #include <math.h> #include <mruby.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/hash.h> #include <mruby/irep.h> #include <mruby/numeric.h> #include <mruby/proc.h> #include <mruby/range.h> #include <mruby/string.h> #include <mruby/variable.h> #include <mruby/error.h> #include <mruby/opcode.h> #include "value_array.h" #include <mruby/throw.h> #ifdef MRB_DISABLE_STDIO #if defined(__cplusplus) extern "C" { #endif void abort(void); #if defined(__cplusplus) } /* extern "C" { */ #endif #endif #define STACK_INIT_SIZE 128 #define CALLINFO_INIT_SIZE 32 #ifndef ENSURE_STACK_INIT_SIZE #define ENSURE_STACK_INIT_SIZE 16 #endif #ifndef RESCUE_STACK_INIT_SIZE #define RESCUE_STACK_INIT_SIZE 16 #endif /* Define amount of linear stack growth. */ #ifndef MRB_STACK_GROWTH #define MRB_STACK_GROWTH 128 #endif /* Maximum mrb_funcall() depth. Should be set lower on memory constrained systems. */ #ifndef MRB_FUNCALL_DEPTH_MAX #define MRB_FUNCALL_DEPTH_MAX 512 #endif /* Maximum depth of ecall() recursion. */ #ifndef MRB_ECALL_DEPTH_MAX #define MRB_ECALL_DEPTH_MAX 32 #endif /* Maximum stack depth. Should be set lower on memory constrained systems. The value below allows about 60000 recursive calls in the simplest case. */ #ifndef MRB_STACK_MAX #define MRB_STACK_MAX (0x40000 - MRB_STACK_GROWTH) #endif #ifdef VM_DEBUG # define DEBUG(x) (x) #else # define DEBUG(x) #endif #ifndef MRB_GC_FIXED_ARENA static void mrb_gc_arena_shrink(mrb_state *mrb, int idx) { mrb_gc *gc = &mrb->gc; int capa = gc->arena_capa; if (idx < capa / 4) { capa >>= 2; if (capa < MRB_GC_ARENA_SIZE) { capa = MRB_GC_ARENA_SIZE; } if (capa != gc->arena_capa) { gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa); gc->arena_capa = capa; } } } #else #define mrb_gc_arena_shrink(mrb,idx) #endif #define CALL_MAXARGS 127 void mrb_method_missing(mrb_state *mrb, mrb_sym name, mrb_value self, mrb_value args); static inline void stack_clear(mrb_value *from, size_t count) { #ifndef MRB_NAN_BOXING const mrb_value mrb_value_zero = { { 0 } }; while (count-- > 0) { *from++ = mrb_value_zero; } #else while (count-- > 0) { SET_NIL_VALUE(*from); from++; } #endif } static inline void stack_copy(mrb_value *dst, const mrb_value *src, size_t size) { while (size-- > 0) { *dst++ = *src++; } } static void stack_init(mrb_state *mrb) { struct mrb_context *c = mrb->c; /* mrb_assert(mrb->stack == NULL); */ c->stbase = (mrb_value *)mrb_calloc(mrb, STACK_INIT_SIZE, sizeof(mrb_value)); c->stend = c->stbase + STACK_INIT_SIZE; c->stack = c->stbase; /* mrb_assert(ci == NULL); */ c->cibase = (mrb_callinfo *)mrb_calloc(mrb, CALLINFO_INIT_SIZE, sizeof(mrb_callinfo)); c->ciend = c->cibase + CALLINFO_INIT_SIZE; c->ci = c->cibase; c->ci->target_class = mrb->object_class; c->ci->stackent = c->stack; } static inline void envadjust(mrb_state *mrb, mrb_value *oldbase, mrb_value *newbase, size_t size) { mrb_callinfo *ci = mrb->c->cibase; if (newbase == oldbase) return; while (ci <= mrb->c->ci) { struct REnv *e = ci->env; mrb_value *st; if (e && MRB_ENV_STACK_SHARED_P(e) && (st = e->stack) && oldbase <= st && st < oldbase+size) { ptrdiff_t off = e->stack - oldbase; e->stack = newbase + off; } if (ci->proc && MRB_PROC_ENV_P(ci->proc) && ci->env != MRB_PROC_ENV(ci->proc)) { e = MRB_PROC_ENV(ci->proc); if (e && MRB_ENV_STACK_SHARED_P(e) && (st = e->stack) && oldbase <= st && st < oldbase+size) { ptrdiff_t off = e->stack - oldbase; e->stack = newbase + off; } } ci->stackent = newbase + (ci->stackent - oldbase); ci++; } } /** def rec ; $deep =+ 1 ; if $deep > 1000 ; return 0 ; end ; rec ; end */ static void stack_extend_alloc(mrb_state *mrb, int room) { mrb_value *oldbase = mrb->c->stbase; mrb_value *newstack; size_t oldsize = mrb->c->stend - mrb->c->stbase; size_t size = oldsize; size_t off = mrb->c->stack - mrb->c->stbase; if (off > size) size = off; #ifdef MRB_STACK_EXTEND_DOUBLING if (room <= size) size *= 2; else size += room; #else /* Use linear stack growth. It is slightly slower than doubling the stack space, but it saves memory on small devices. */ if (room <= MRB_STACK_GROWTH) size += MRB_STACK_GROWTH; else size += room; #endif newstack = (mrb_value *)mrb_realloc(mrb, mrb->c->stbase, sizeof(mrb_value) * size); if (newstack == NULL) { mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err)); } stack_clear(&(newstack[oldsize]), size - oldsize); envadjust(mrb, oldbase, newstack, size); mrb->c->stbase = newstack; mrb->c->stack = mrb->c->stbase + off; mrb->c->stend = mrb->c->stbase + size; /* Raise an exception if the new stack size will be too large, to prevent infinite recursion. However, do this only after resizing the stack, so mrb_raise has stack space to work with. */ if (size > MRB_STACK_MAX) { mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err)); } } static inline void stack_extend(mrb_state *mrb, int room) { if (mrb->c->stack + room >= mrb->c->stend) { stack_extend_alloc(mrb, room); } } static inline struct REnv* uvenv(mrb_state *mrb, int up) { struct RProc *proc = mrb->c->ci->proc; struct REnv *e; while (up--) { proc = proc->upper; if (!proc) return NULL; } e = MRB_PROC_ENV(proc); if (e) return e; /* proc has enclosed env */ else { mrb_callinfo *ci = mrb->c->ci; mrb_callinfo *cb = mrb->c->cibase; while (cb <= ci) { if (ci->proc == proc) { return ci->env; } ci--; } } return NULL; } static inline struct RProc* top_proc(mrb_state *mrb, struct RProc *proc) { while (proc->upper) { if (MRB_PROC_SCOPE_P(proc) || MRB_PROC_STRICT_P(proc)) return proc; proc = proc->upper; } return proc; } #define CI_ACC_SKIP -1 #define CI_ACC_DIRECT -2 #define CI_ACC_RESUMED -3 static inline mrb_callinfo* cipush(mrb_state *mrb) { struct mrb_context *c = mrb->c; static const mrb_callinfo ci_zero = { 0 }; mrb_callinfo *ci = c->ci; int ridx = ci->ridx; if (ci + 1 == c->ciend) { ptrdiff_t size = ci - c->cibase; c->cibase = (mrb_callinfo *)mrb_realloc(mrb, c->cibase, sizeof(mrb_callinfo)*size*2); c->ci = c->cibase + size; c->ciend = c->cibase + size * 2; } ci = ++c->ci; *ci = ci_zero; ci->epos = mrb->c->eidx; ci->ridx = ridx; return ci; } void mrb_env_unshare(mrb_state *mrb, struct REnv *e) { if (e == NULL) return; else { size_t len = (size_t)MRB_ENV_STACK_LEN(e); mrb_value *p; if (!MRB_ENV_STACK_SHARED_P(e)) return; if (e->cxt != mrb->c) return; p = (mrb_value *)mrb_malloc(mrb, sizeof(mrb_value)*len); if (len > 0) { stack_copy(p, e->stack, len); } e->stack = p; MRB_ENV_UNSHARE_STACK(e); mrb_write_barrier(mrb, (struct RBasic *)e); } } static inline void cipop(mrb_state *mrb) { struct mrb_context *c = mrb->c; struct REnv *env = c->ci->env; c->ci--; if (env) mrb_env_unshare(mrb, env); } void mrb_exc_set(mrb_state *mrb, mrb_value exc); static void ecall(mrb_state *mrb) { struct RProc *p; struct mrb_context *c = mrb->c; mrb_callinfo *ci = c->ci; struct RObject *exc; struct REnv *env; ptrdiff_t cioff; int ai = mrb_gc_arena_save(mrb); int i = --c->eidx; int nregs; if (i<0) return; if (ci - c->cibase > MRB_ECALL_DEPTH_MAX) { mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err)); } p = c->ensure[i]; if (!p) return; mrb_assert(!MRB_PROC_CFUNC_P(p)); c->ensure[i] = NULL; nregs = p->upper->body.irep->nregs; if (ci->proc && !MRB_PROC_CFUNC_P(ci->proc) && ci->proc->body.irep->nregs > nregs) { nregs = ci->proc->body.irep->nregs; } cioff = ci - c->cibase; ci = cipush(mrb); ci->stackent = mrb->c->stack; ci->mid = ci[-1].mid; ci->acc = CI_ACC_SKIP; ci->argc = 0; ci->proc = p; ci->nregs = p->body.irep->nregs; ci->target_class = MRB_PROC_TARGET_CLASS(p); env = MRB_PROC_ENV(p); mrb_assert(env); c->stack += nregs; exc = mrb->exc; mrb->exc = 0; if (exc) { mrb_gc_protect(mrb, mrb_obj_value(exc)); } mrb_run(mrb, p, env->stack[0]); mrb->c = c; c->ci = c->cibase + cioff; if (!mrb->exc) mrb->exc = exc; mrb_gc_arena_restore(mrb, ai); } #ifndef MRB_FUNCALL_ARGC_MAX #define MRB_FUNCALL_ARGC_MAX 16 #endif MRB_API mrb_value mrb_funcall(mrb_state *mrb, mrb_value self, const char *name, mrb_int argc, ...) { mrb_value argv[MRB_FUNCALL_ARGC_MAX]; va_list ap; mrb_int i; mrb_sym mid = mrb_intern_cstr(mrb, name); if (argc > MRB_FUNCALL_ARGC_MAX) { mrb_raise(mrb, E_ARGUMENT_ERROR, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX) ")"); } va_start(ap, argc); for (i = 0; i < argc; i++) { argv[i] = va_arg(ap, mrb_value); } va_end(ap); return mrb_funcall_argv(mrb, self, mid, argc, argv); } MRB_API mrb_value mrb_funcall_with_block(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, const mrb_value *argv, mrb_value blk) { mrb_value val; if (!mrb->jmp) { struct mrb_jmpbuf c_jmp; ptrdiff_t nth_ci = mrb->c->ci - mrb->c->cibase; MRB_TRY(&c_jmp) { mrb->jmp = &c_jmp; /* recursive call */ val = mrb_funcall_with_block(mrb, self, mid, argc, argv, blk); mrb->jmp = 0; } MRB_CATCH(&c_jmp) { /* error */ while (nth_ci < (mrb->c->ci - mrb->c->cibase)) { mrb->c->stack = mrb->c->ci->stackent; cipop(mrb); } mrb->jmp = 0; val = mrb_obj_value(mrb->exc); } MRB_END_EXC(&c_jmp); mrb->jmp = 0; } else { mrb_method_t m; struct RClass *c; mrb_callinfo *ci; int n; ptrdiff_t voff = -1; if (!mrb->c->stack) { stack_init(mrb); } n = mrb->c->ci->nregs; if (argc < 0) { mrb_raisef(mrb, E_ARGUMENT_ERROR, "negative argc for funcall (%S)", mrb_fixnum_value(argc)); } c = mrb_class(mrb, self); m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); mrb_value args = mrb_ary_new_from_values(mrb, argc, argv); m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m)) { mrb_method_missing(mrb, mid, self, args); } mrb_ary_unshift(mrb, args, mrb_symbol_value(mid)); stack_extend(mrb, n+2); mrb->c->stack[n+1] = args; argc = -1; } if (mrb->c->ci - mrb->c->cibase > MRB_FUNCALL_DEPTH_MAX) { mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err)); } ci = cipush(mrb); ci->mid = mid; ci->stackent = mrb->c->stack; ci->argc = (int)argc; ci->target_class = c; mrb->c->stack = mrb->c->stack + n; if (mrb->c->stbase <= argv && argv < mrb->c->stend) { voff = argv - mrb->c->stbase; } if (MRB_METHOD_CFUNC_P(m)) { ci->nregs = (int)(argc + 2); stack_extend(mrb, ci->nregs); } else if (argc >= CALL_MAXARGS) { mrb_value args = mrb_ary_new_from_values(mrb, argc, argv); stack_extend(mrb, ci->nregs+2); mrb->c->stack[1] = args; ci->argc = -1; argc = 1; } else { struct RProc *p = MRB_METHOD_PROC(m); ci->proc = p; if (argc < 0) argc = 1; ci->nregs = (int)(p->body.irep->nregs + argc); stack_extend(mrb, ci->nregs); } if (voff >= 0) { argv = mrb->c->stbase + voff; } mrb->c->stack[0] = self; if (ci->argc > 0) { stack_copy(mrb->c->stack+1, argv, argc); } mrb->c->stack[argc+1] = blk; if (MRB_METHOD_CFUNC_P(m)) { int ai = mrb_gc_arena_save(mrb); ci->acc = CI_ACC_DIRECT; if (MRB_METHOD_PROC_P(m)) { ci->proc = MRB_METHOD_PROC(m); } val = MRB_METHOD_CFUNC(m)(mrb, self); mrb->c->stack = mrb->c->ci->stackent; cipop(mrb); mrb_gc_arena_restore(mrb, ai); } else { ci->acc = CI_ACC_SKIP; val = mrb_run(mrb, MRB_METHOD_PROC(m), self); } } mrb_gc_protect(mrb, val); return val; } MRB_API mrb_value mrb_funcall_argv(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, const mrb_value *argv) { return mrb_funcall_with_block(mrb, self, mid, argc, argv, mrb_nil_value()); } mrb_value mrb_exec_irep(mrb_state *mrb, mrb_value self, struct RProc *p) { mrb_callinfo *ci = mrb->c->ci; int keep; mrb->c->stack[0] = self; ci->proc = p; if (MRB_PROC_CFUNC_P(p)) { return MRB_PROC_CFUNC(p)(mrb, self); } ci->nregs = p->body.irep->nregs; if (ci->argc < 0) keep = 3; else keep = ci->argc + 2; if (ci->nregs < keep) { stack_extend(mrb, keep); } else { stack_extend(mrb, ci->nregs); stack_clear(mrb->c->stack+keep, ci->nregs-keep); } ci = cipush(mrb); ci->nregs = 0; ci->target_class = 0; ci->pc = p->body.irep->iseq; ci->stackent = mrb->c->stack; ci->acc = 0; return self; } /* 15.3.1.3.4 */ /* 15.3.1.3.44 */ /* * call-seq: * obj.send(symbol [, args...]) -> obj * obj.__send__(symbol [, args...]) -> obj * * Invokes the method identified by _symbol_, passing it any * arguments specified. You can use <code>__send__</code> if the name * +send+ clashes with an existing method in _obj_. * * class Klass * def hello(*args) * "Hello " + args.join(' ') * end * end * k = Klass.new * k.send :hello, "gentle", "readers" #=> "Hello gentle readers" */ MRB_API mrb_value mrb_f_send(mrb_state *mrb, mrb_value self) { mrb_sym name; mrb_value block, *argv, *regs; mrb_int argc, i, len; mrb_method_t m; struct RClass *c; mrb_callinfo *ci; mrb_get_args(mrb, "n*&", &name, &argv, &argc, &block); ci = mrb->c->ci; if (ci->acc < 0) { funcall: return mrb_funcall_with_block(mrb, self, name, argc, argv, block); } c = mrb_class(mrb, self); m = mrb_method_search_vm(mrb, &c, name); if (MRB_METHOD_UNDEF_P(m)) { /* call method_mising */ goto funcall; } ci->mid = name; ci->target_class = c; regs = mrb->c->stack+1; /* remove first symbol from arguments */ if (ci->argc >= 0) { for (i=0,len=ci->argc; i<len; i++) { regs[i] = regs[i+1]; } ci->argc--; } else { /* variable length arguments */ mrb_ary_shift(mrb, regs[0]); } if (MRB_METHOD_CFUNC_P(m)) { if (MRB_METHOD_PROC_P(m)) { ci->proc = MRB_METHOD_PROC(m); } return MRB_METHOD_CFUNC(m)(mrb, self); } return mrb_exec_irep(mrb, self, MRB_METHOD_PROC(m)); } static mrb_value eval_under(mrb_state *mrb, mrb_value self, mrb_value blk, struct RClass *c) { struct RProc *p; mrb_callinfo *ci; if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given"); } ci = mrb->c->ci; if (ci->acc == CI_ACC_DIRECT) { ci->target_class = c; return mrb_yield_cont(mrb, blk, self, 1, &self); } ci->target_class = c; p = mrb_proc_ptr(blk); ci->proc = p; ci->argc = 1; ci->mid = ci[-1].mid; if (MRB_PROC_CFUNC_P(p)) { stack_extend(mrb, 3); mrb->c->stack[0] = self; mrb->c->stack[1] = self; mrb->c->stack[2] = mrb_nil_value(); return MRB_PROC_CFUNC(p)(mrb, self); } ci->nregs = p->body.irep->nregs; stack_extend(mrb, (ci->nregs < 3) ? 3 : ci->nregs); mrb->c->stack[0] = self; mrb->c->stack[1] = self; mrb->c->stack[2] = mrb_nil_value(); ci = cipush(mrb); ci->nregs = 0; ci->target_class = 0; ci->pc = p->body.irep->iseq; ci->stackent = mrb->c->stack; ci->acc = 0; return self; } /* 15.2.2.4.35 */ /* * call-seq: * mod.class_eval {| | block } -> obj * mod.module_eval {| | block } -> obj * * Evaluates block in the context of _mod_. This can * be used to add methods to a class. <code>module_eval</code> returns * the result of evaluating its argument. */ mrb_value mrb_mod_module_eval(mrb_state *mrb, mrb_value mod) { mrb_value a, b; if (mrb_get_args(mrb, "|S&", &a, &b) == 1) { mrb_raise(mrb, E_NOTIMP_ERROR, "module_eval/class_eval with string not implemented"); } return eval_under(mrb, mod, b, mrb_class_ptr(mod)); } /* 15.3.1.3.18 */ /* * call-seq: * obj.instance_eval {| | block } -> obj * * Evaluates the given block,within the context of the receiver (_obj_). * In order to set the context, the variable +self+ is set to _obj_ while * the code is executing, giving the code access to _obj_'s * instance variables. In the version of <code>instance_eval</code> * that takes a +String+, the optional second and third * parameters supply a filename and starting line number that are used * when reporting compilation errors. * * class KlassWithSecret * def initialize * @secret = 99 * end * end * k = KlassWithSecret.new * k.instance_eval { @secret } #=> 99 */ mrb_value mrb_obj_instance_eval(mrb_state *mrb, mrb_value self) { mrb_value a, b; mrb_value cv; struct RClass *c; if (mrb_get_args(mrb, "|S&", &a, &b) == 1) { mrb_raise(mrb, E_NOTIMP_ERROR, "instance_eval with string not implemented"); } switch (mrb_type(self)) { case MRB_TT_SYMBOL: case MRB_TT_FIXNUM: #ifndef MRB_WITHOUT_FLOAT case MRB_TT_FLOAT: #endif c = 0; break; default: cv = mrb_singleton_class(mrb, self); c = mrb_class_ptr(cv); break; } return eval_under(mrb, self, b, c); } MRB_API mrb_value mrb_yield_with_class(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv, mrb_value self, struct RClass *c) { struct RProc *p; mrb_sym mid = mrb->c->ci->mid; mrb_callinfo *ci; int n = mrb->c->ci->nregs; mrb_value val; if (mrb_nil_p(b)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given"); } if (mrb->c->ci - mrb->c->cibase > MRB_FUNCALL_DEPTH_MAX) { mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err)); } p = mrb_proc_ptr(b); ci = cipush(mrb); ci->mid = mid; ci->proc = p; ci->stackent = mrb->c->stack; ci->argc = (int)argc; ci->target_class = c; ci->acc = CI_ACC_SKIP; mrb->c->stack = mrb->c->stack + n; ci->nregs = MRB_PROC_CFUNC_P(p) ? (int)(argc+2) : p->body.irep->nregs; stack_extend(mrb, ci->nregs); mrb->c->stack[0] = self; if (argc > 0) { stack_copy(mrb->c->stack+1, argv, argc); } mrb->c->stack[argc+1] = mrb_nil_value(); if (MRB_PROC_CFUNC_P(p)) { val = MRB_PROC_CFUNC(p)(mrb, self); mrb->c->stack = mrb->c->ci->stackent; cipop(mrb); } else { val = mrb_run(mrb, p, self); } return val; } MRB_API mrb_value mrb_yield_argv(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv) { struct RProc *p = mrb_proc_ptr(b); return mrb_yield_with_class(mrb, b, argc, argv, MRB_PROC_ENV(p)->stack[0], MRB_PROC_TARGET_CLASS(p)); } MRB_API mrb_value mrb_yield(mrb_state *mrb, mrb_value b, mrb_value arg) { struct RProc *p = mrb_proc_ptr(b); return mrb_yield_with_class(mrb, b, 1, &arg, MRB_PROC_ENV(p)->stack[0], MRB_PROC_TARGET_CLASS(p)); } mrb_value mrb_yield_cont(mrb_state *mrb, mrb_value b, mrb_value self, mrb_int argc, const mrb_value *argv) { struct RProc *p; mrb_callinfo *ci; if (mrb_nil_p(b)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given"); } if (mrb_type(b) != MRB_TT_PROC) { mrb_raise(mrb, E_TYPE_ERROR, "not a block"); } p = mrb_proc_ptr(b); ci = mrb->c->ci; stack_extend(mrb, 3); mrb->c->stack[1] = mrb_ary_new_from_values(mrb, argc, argv); mrb->c->stack[2] = mrb_nil_value(); ci->argc = -1; return mrb_exec_irep(mrb, self, p); } mrb_value mrb_mod_s_nesting(mrb_state *mrb, mrb_value mod) { struct RProc *proc; mrb_value ary; struct RClass *c = NULL; mrb_get_args(mrb, ""); ary = mrb_ary_new(mrb); proc = mrb->c->ci[-1].proc; /* callee proc */ mrb_assert(!MRB_PROC_CFUNC_P(proc)); while (proc) { if (MRB_PROC_SCOPE_P(proc)) { struct RClass *c2 = MRB_PROC_TARGET_CLASS(proc); if (c2 != c) { c = c2; mrb_ary_push(mrb, ary, mrb_obj_value(c)); } } proc = proc->upper; } return ary; } static struct RBreak* break_new(mrb_state *mrb, struct RProc *p, mrb_value val) { struct RBreak *brk; brk = (struct RBreak*)mrb_obj_alloc(mrb, MRB_TT_BREAK, NULL); brk->proc = p; brk->val = val; return brk; } typedef enum { LOCALJUMP_ERROR_RETURN = 0, LOCALJUMP_ERROR_BREAK = 1, LOCALJUMP_ERROR_YIELD = 2 } localjump_error_kind; static void localjump_error(mrb_state *mrb, localjump_error_kind kind) { char kind_str[3][7] = { "return", "break", "yield" }; char kind_str_len[] = { 6, 5, 5 }; static const char lead[] = "unexpected "; mrb_value msg; mrb_value exc; msg = mrb_str_new_capa(mrb, sizeof(lead) + 7); mrb_str_cat(mrb, msg, lead, sizeof(lead) - 1); mrb_str_cat(mrb, msg, kind_str[kind], kind_str_len[kind]); exc = mrb_exc_new_str(mrb, E_LOCALJUMP_ERROR, msg); mrb_exc_set(mrb, exc); } static void argnum_error(mrb_state *mrb, mrb_int num) { mrb_value exc; mrb_value str; mrb_int argc = mrb->c->ci->argc; if (argc < 0) { mrb_value args = mrb->c->stack[1]; if (mrb_array_p(args)) { argc = RARRAY_LEN(args); } } if (mrb->c->ci->mid) { str = mrb_format(mrb, "'%S': wrong number of arguments (%S for %S)", mrb_sym2str(mrb, mrb->c->ci->mid), mrb_fixnum_value(argc), mrb_fixnum_value(num)); } else { str = mrb_format(mrb, "wrong number of arguments (%S for %S)", mrb_fixnum_value(argc), mrb_fixnum_value(num)); } exc = mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str); mrb_exc_set(mrb, exc); } #define ERR_PC_SET(mrb, pc) mrb->c->ci->err = pc; #define ERR_PC_CLR(mrb) mrb->c->ci->err = 0; #ifdef MRB_ENABLE_DEBUG_HOOK #define CODE_FETCH_HOOK(mrb, irep, pc, regs) if ((mrb)->code_fetch_hook) (mrb)->code_fetch_hook((mrb), (irep), (pc), (regs)); #else #define CODE_FETCH_HOOK(mrb, irep, pc, regs) #endif #ifdef MRB_BYTECODE_DECODE_OPTION #define BYTECODE_DECODER(x) ((mrb)->bytecode_decoder)?(mrb)->bytecode_decoder((mrb), (x)):(x) #else #define BYTECODE_DECODER(x) (x) #endif #if defined __GNUC__ || defined __clang__ || defined __INTEL_COMPILER #define DIRECT_THREADED #endif #ifndef DIRECT_THREADED #define INIT_DISPATCH for (;;) { i = BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); switch (GET_OPCODE(i)) { #define CASE(op) case op: #define NEXT pc++; break #define JUMP break #define END_DISPATCH }} #else #define INIT_DISPATCH JUMP; return mrb_nil_value(); #define CASE(op) L_ ## op: #define NEXT i=BYTECODE_DECODER(*++pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); goto *optable[GET_OPCODE(i)] #define JUMP i=BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); goto *optable[GET_OPCODE(i)] #define END_DISPATCH #endif MRB_API mrb_value mrb_vm_run(mrb_state *mrb, struct RProc *proc, mrb_value self, unsigned int stack_keep) { mrb_irep *irep = proc->body.irep; mrb_value result; struct mrb_context *c = mrb->c; ptrdiff_t cioff = c->ci - c->cibase; unsigned int nregs = irep->nregs; if (!c->stack) { stack_init(mrb); } if (stack_keep > nregs) nregs = stack_keep; stack_extend(mrb, nregs); stack_clear(c->stack + stack_keep, nregs - stack_keep); c->stack[0] = self; result = mrb_vm_exec(mrb, proc, irep->iseq); if (c->ci - c->cibase > cioff) { c->ci = c->cibase + cioff; } if (mrb->c != c) { if (mrb->c->fib) { mrb_write_barrier(mrb, (struct RBasic*)mrb->c->fib); } mrb->c = c; } return result; } MRB_API mrb_value mrb_vm_exec(mrb_state *mrb, struct RProc *proc, mrb_code *pc) { /* mrb_assert(mrb_proc_cfunc_p(proc)) */ mrb_irep *irep = proc->body.irep; mrb_value *pool = irep->pool; mrb_sym *syms = irep->syms; mrb_code i; int ai = mrb_gc_arena_save(mrb); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf c_jmp; #ifdef DIRECT_THREADED static void *optable[] = { &&L_OP_NOP, &&L_OP_MOVE, &&L_OP_LOADL, &&L_OP_LOADI, &&L_OP_LOADSYM, &&L_OP_LOADNIL, &&L_OP_LOADSELF, &&L_OP_LOADT, &&L_OP_LOADF, &&L_OP_GETGLOBAL, &&L_OP_SETGLOBAL, &&L_OP_GETSPECIAL, &&L_OP_SETSPECIAL, &&L_OP_GETIV, &&L_OP_SETIV, &&L_OP_GETCV, &&L_OP_SETCV, &&L_OP_GETCONST, &&L_OP_SETCONST, &&L_OP_GETMCNST, &&L_OP_SETMCNST, &&L_OP_GETUPVAR, &&L_OP_SETUPVAR, &&L_OP_JMP, &&L_OP_JMPIF, &&L_OP_JMPNOT, &&L_OP_ONERR, &&L_OP_RESCUE, &&L_OP_POPERR, &&L_OP_RAISE, &&L_OP_EPUSH, &&L_OP_EPOP, &&L_OP_SEND, &&L_OP_SENDB, &&L_OP_FSEND, &&L_OP_CALL, &&L_OP_SUPER, &&L_OP_ARGARY, &&L_OP_ENTER, &&L_OP_KARG, &&L_OP_KDICT, &&L_OP_RETURN, &&L_OP_TAILCALL, &&L_OP_BLKPUSH, &&L_OP_ADD, &&L_OP_ADDI, &&L_OP_SUB, &&L_OP_SUBI, &&L_OP_MUL, &&L_OP_DIV, &&L_OP_EQ, &&L_OP_LT, &&L_OP_LE, &&L_OP_GT, &&L_OP_GE, &&L_OP_ARRAY, &&L_OP_ARYCAT, &&L_OP_ARYPUSH, &&L_OP_AREF, &&L_OP_ASET, &&L_OP_APOST, &&L_OP_STRING, &&L_OP_STRCAT, &&L_OP_HASH, &&L_OP_LAMBDA, &&L_OP_RANGE, &&L_OP_OCLASS, &&L_OP_CLASS, &&L_OP_MODULE, &&L_OP_EXEC, &&L_OP_METHOD, &&L_OP_SCLASS, &&L_OP_TCLASS, &&L_OP_DEBUG, &&L_OP_STOP, &&L_OP_ERR, }; #endif mrb_bool exc_catched = FALSE; RETRY_TRY_BLOCK: MRB_TRY(&c_jmp) { if (exc_catched) { exc_catched = FALSE; if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK) goto L_BREAK; goto L_RAISE; } mrb->jmp = &c_jmp; mrb->c->ci->proc = proc; mrb->c->ci->nregs = irep->nregs; #define regs (mrb->c->stack) INIT_DISPATCH { CASE(OP_NOP) { /* do nothing */ NEXT; } CASE(OP_MOVE) { /* A B R(A) := R(B) */ int a = GETARG_A(i); int b = GETARG_B(i); regs[a] = regs[b]; NEXT; } CASE(OP_LOADL) { /* A Bx R(A) := Pool(Bx) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); #ifdef MRB_WORD_BOXING mrb_value val = pool[bx]; #ifndef MRB_WITHOUT_FLOAT if (mrb_float_p(val)) { val = mrb_float_value(mrb, mrb_float(val)); } #endif regs[a] = val; #else regs[a] = pool[bx]; #endif NEXT; } CASE(OP_LOADI) { /* A sBx R(A) := sBx */ int a = GETARG_A(i); mrb_int bx = GETARG_sBx(i); SET_INT_VALUE(regs[a], bx); NEXT; } CASE(OP_LOADSYM) { /* A Bx R(A) := Syms(Bx) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); SET_SYM_VALUE(regs[a], syms[bx]); NEXT; } CASE(OP_LOADSELF) { /* A R(A) := self */ int a = GETARG_A(i); regs[a] = regs[0]; NEXT; } CASE(OP_LOADT) { /* A R(A) := true */ int a = GETARG_A(i); SET_TRUE_VALUE(regs[a]); NEXT; } CASE(OP_LOADF) { /* A R(A) := false */ int a = GETARG_A(i); SET_FALSE_VALUE(regs[a]); NEXT; } CASE(OP_GETGLOBAL) { /* A Bx R(A) := getglobal(Syms(Bx)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val = mrb_gv_get(mrb, syms[bx]); regs[a] = val; NEXT; } CASE(OP_SETGLOBAL) { /* A Bx setglobal(Syms(Bx), R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_gv_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETSPECIAL) { /* A Bx R(A) := Special[Bx] */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val = mrb_vm_special_get(mrb, bx); regs[a] = val; NEXT; } CASE(OP_SETSPECIAL) { /* A Bx Special[Bx] := R(A) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_special_set(mrb, bx, regs[a]); NEXT; } CASE(OP_GETIV) { /* A Bx R(A) := ivget(Bx) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val = mrb_vm_iv_get(mrb, syms[bx]); regs[a] = val; NEXT; } CASE(OP_SETIV) { /* A Bx ivset(Syms(Bx),R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_iv_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETCV) { /* A Bx R(A) := cvget(Syms(Bx)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_value val; ERR_PC_SET(mrb, pc); val = mrb_vm_cv_get(mrb, syms[bx]); ERR_PC_CLR(mrb); regs[a] = val; NEXT; } CASE(OP_SETCV) { /* A Bx cvset(Syms(Bx),R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_cv_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETCONST) { /* A Bx R(A) := constget(Syms(Bx)) */ mrb_value val; int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_sym sym = syms[bx]; ERR_PC_SET(mrb, pc); val = mrb_vm_const_get(mrb, sym); ERR_PC_CLR(mrb); regs[a] = val; NEXT; } CASE(OP_SETCONST) { /* A Bx constset(Syms(Bx),R(A)) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_vm_const_set(mrb, syms[bx], regs[a]); NEXT; } CASE(OP_GETMCNST) { /* A Bx R(A) := R(A)::Syms(Bx) */ mrb_value val; int a = GETARG_A(i); int bx = GETARG_Bx(i); ERR_PC_SET(mrb, pc); val = mrb_const_get(mrb, regs[a], syms[bx]); ERR_PC_CLR(mrb); regs[a] = val; NEXT; } CASE(OP_SETMCNST) { /* A Bx R(A+1)::Syms(Bx) := R(A) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_const_set(mrb, regs[a+1], syms[bx], regs[a]); NEXT; } CASE(OP_GETUPVAR) { /* A B C R(A) := uvget(B,C) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value *regs_a = regs + a; struct REnv *e = uvenv(mrb, c); if (!e) { *regs_a = mrb_nil_value(); } else { *regs_a = e->stack[b]; } NEXT; } CASE(OP_SETUPVAR) { /* A B C uvset(B,C,R(A)) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); struct REnv *e = uvenv(mrb, c); if (e) { mrb_value *regs_a = regs + a; if (b < MRB_ENV_STACK_LEN(e)) { e->stack[b] = *regs_a; mrb_write_barrier(mrb, (struct RBasic*)e); } } NEXT; } CASE(OP_JMP) { /* sBx pc+=sBx */ int sbx = GETARG_sBx(i); pc += sbx; JUMP; } CASE(OP_JMPIF) { /* A sBx if R(A) pc+=sBx */ int a = GETARG_A(i); int sbx = GETARG_sBx(i); if (mrb_test(regs[a])) { pc += sbx; JUMP; } NEXT; } CASE(OP_JMPNOT) { /* A sBx if !R(A) pc+=sBx */ int a = GETARG_A(i); int sbx = GETARG_sBx(i); if (!mrb_test(regs[a])) { pc += sbx; JUMP; } NEXT; } CASE(OP_ONERR) { /* sBx pc+=sBx on exception */ int sbx = GETARG_sBx(i); if (mrb->c->rsize <= mrb->c->ci->ridx) { if (mrb->c->rsize == 0) mrb->c->rsize = RESCUE_STACK_INIT_SIZE; else mrb->c->rsize *= 2; mrb->c->rescue = (mrb_code **)mrb_realloc(mrb, mrb->c->rescue, sizeof(mrb_code*) * mrb->c->rsize); } mrb->c->rescue[mrb->c->ci->ridx++] = pc + sbx; NEXT; } CASE(OP_RESCUE) { /* A B R(A) := exc; clear(exc); R(B) := matched (bool) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value exc; if (c == 0) { exc = mrb_obj_value(mrb->exc); mrb->exc = 0; } else { /* continued; exc taken from R(A) */ exc = regs[a]; } if (b != 0) { mrb_value e = regs[b]; struct RClass *ec; switch (mrb_type(e)) { case MRB_TT_CLASS: case MRB_TT_MODULE: break; default: { mrb_value exc; exc = mrb_exc_new_str_lit(mrb, E_TYPE_ERROR, "class or module required for rescue clause"); mrb_exc_set(mrb, exc); goto L_RAISE; } } ec = mrb_class_ptr(e); regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec)); } if (a != 0 && c == 0) { regs[a] = exc; } NEXT; } CASE(OP_POPERR) { /* A A.times{rescue_pop()} */ int a = GETARG_A(i); mrb->c->ci->ridx -= a; NEXT; } CASE(OP_RAISE) { /* A raise(R(A)) */ int a = GETARG_A(i); mrb_exc_set(mrb, regs[a]); goto L_RAISE; } CASE(OP_EPUSH) { /* Bx ensure_push(SEQ[Bx]) */ int bx = GETARG_Bx(i); struct RProc *p; p = mrb_closure_new(mrb, irep->reps[bx]); /* push ensure_stack */ if (mrb->c->esize <= mrb->c->eidx+1) { if (mrb->c->esize == 0) mrb->c->esize = ENSURE_STACK_INIT_SIZE; else mrb->c->esize *= 2; mrb->c->ensure = (struct RProc **)mrb_realloc(mrb, mrb->c->ensure, sizeof(struct RProc*) * mrb->c->esize); } mrb->c->ensure[mrb->c->eidx++] = p; mrb->c->ensure[mrb->c->eidx] = NULL; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EPOP) { /* A A.times{ensure_pop().call} */ int a = GETARG_A(i); mrb_callinfo *ci = mrb->c->ci; int n, epos = ci->epos; mrb_value self = regs[0]; struct RClass *target_class = ci->target_class; if (mrb->c->eidx <= epos) { NEXT; } if (a > mrb->c->eidx - epos) a = mrb->c->eidx - epos; pc = pc + 1; for (n=0; n<a; n++) { proc = mrb->c->ensure[epos+n]; mrb->c->ensure[epos+n] = NULL; if (proc == NULL) continue; irep = proc->body.irep; ci = cipush(mrb); ci->mid = ci[-1].mid; ci->argc = 0; ci->proc = proc; ci->stackent = mrb->c->stack; ci->nregs = irep->nregs; ci->target_class = target_class; ci->pc = pc; ci->acc = ci[-1].nregs; mrb->c->stack += ci->acc; stack_extend(mrb, ci->nregs); regs[0] = self; pc = irep->iseq; } pool = irep->pool; syms = irep->syms; mrb->c->eidx = epos; JUMP; } CASE(OP_LOADNIL) { /* A R(A) := nil */ int a = GETARG_A(i); SET_NIL_VALUE(regs[a]); NEXT; } CASE(OP_SENDB) { /* A B C R(A) := call(R(A),Syms(B),R(A+1),...,R(A+C),&R(A+C+1))*/ /* fall through */ }; L_SEND: CASE(OP_SEND) { /* A B C R(A) := call(R(A),Syms(B),R(A+1),...,R(A+C)) */ int a = GETARG_A(i); int n = GETARG_C(i); int argc = (n == CALL_MAXARGS) ? -1 : n; int bidx = (argc < 0) ? a+2 : a+n+1; mrb_method_t m; struct RClass *c; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; mrb_sym mid = syms[GETARG_B(i)]; mrb_assert(bidx < ci->nregs); recv = regs[a]; if (GET_OPCODE(i) != OP_SENDB) { SET_NIL_VALUE(regs[bidx]); blk = regs[bidx]; } else { blk = regs[bidx]; if (!mrb_nil_p(blk) && mrb_type(blk) != MRB_TT_PROC) { blk = mrb_convert_type(mrb, blk, MRB_TT_PROC, "Proc", "to_proc"); /* The stack might have been reallocated during mrb_convert_type(), see #3622 */ regs[bidx] = blk; } } c = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m) || (missing == mrb->c->ci->mid && mrb_obj_eq(mrb, regs[0], recv))) { mrb_value args = (argc < 0) ? regs[a+1] : mrb_ary_new_from_values(mrb, n, regs+a+1); ERR_PC_SET(mrb, pc); mrb_method_missing(mrb, mid, recv, args); } if (argc >= 0) { if (a+2 >= irep->nregs) { stack_extend(mrb, a+3); } regs[a+1] = mrb_ary_new_from_values(mrb, n, regs+a+1); regs[a+2] = blk; argc = -1; } mrb_ary_unshift(mrb, regs[a+1], mrb_symbol_value(mid)); mid = missing; } /* push callinfo */ ci = cipush(mrb); ci->mid = mid; ci->stackent = mrb->c->stack; ci->target_class = c; ci->argc = argc; ci->pc = pc + 1; ci->acc = a; /* prepare stack */ mrb->c->stack += a; if (MRB_METHOD_CFUNC_P(m)) { ci->nregs = (argc < 0) ? 3 : n+2; if (MRB_METHOD_PROC_P(m)) { struct RProc *p = MRB_METHOD_PROC(m); ci->proc = p; recv = p->body.func(mrb, recv); } else { recv = MRB_METHOD_FUNC(m)(mrb, recv); } mrb_gc_arena_restore(mrb, ai); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (GET_OPCODE(i) == OP_SENDB) { if (mrb_type(blk) == MRB_TT_PROC) { struct RProc *p = mrb_proc_ptr(blk); if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == ci[-1].env) { p->flags |= MRB_PROC_ORPHAN; } } } if (!ci->target_class) { /* return from context modifying method (resume/yield) */ if (ci->acc == CI_ACC_RESUMED) { mrb->jmp = prev_jmp; return recv; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->stack[0] = recv; /* pop stackpos */ mrb->c->stack = ci->stackent; pc = ci->pc; cipop(mrb); JUMP; } else { /* setup environment for calling method */ proc = ci->proc = MRB_METHOD_PROC(m); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, (argc < 0 && ci->nregs < 3) ? 3 : ci->nregs); pc = irep->iseq; JUMP; } } CASE(OP_FSEND) { /* A B C R(A) := fcall(R(A),Syms(B),R(A+1),... ,R(A+C-1)) */ /* not implemented yet */ NEXT; } CASE(OP_CALL) { /* A R(A) := self.call(frame.argc, frame.argv) */ mrb_callinfo *ci; mrb_value recv = mrb->c->stack[0]; struct RProc *m = mrb_proc_ptr(recv); /* replace callinfo */ ci = mrb->c->ci; ci->target_class = MRB_PROC_TARGET_CLASS(m); ci->proc = m; if (MRB_PROC_ENV_P(m)) { mrb_sym mid; struct REnv *e = MRB_PROC_ENV(m); mid = e->mid; if (mid) ci->mid = mid; if (!e->stack) { e->stack = mrb->c->stack; } } /* prepare stack */ if (MRB_PROC_CFUNC_P(m)) { recv = MRB_PROC_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; /* pop stackpos */ ci = mrb->c->ci; mrb->c->stack = ci->stackent; regs[ci->acc] = recv; pc = ci->pc; cipop(mrb); irep = mrb->c->ci->proc->body.irep; pool = irep->pool; syms = irep->syms; JUMP; } else { /* setup environment for calling method */ proc = m; irep = m->body.irep; if (!irep) { mrb->c->stack[0] = mrb_nil_value(); goto L_RETURN; } pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, ci->nregs); if (ci->argc < 0) { if (irep->nregs > 3) { stack_clear(regs+3, irep->nregs-3); } } else if (ci->argc+2 < irep->nregs) { stack_clear(regs+ci->argc+2, irep->nregs-ci->argc-2); } if (MRB_PROC_ENV_P(m)) { regs[0] = MRB_PROC_ENV(m)->stack[0]; } pc = irep->iseq; JUMP; } } CASE(OP_SUPER) { /* A C R(A) := super(R(A+1),... ,R(A+C+1)) */ int a = GETARG_A(i); int n = GETARG_C(i); int argc = (n == CALL_MAXARGS) ? -1 : n; int bidx = (argc < 0) ? a+2 : a+n+1; mrb_method_t m; struct RClass *c; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; mrb_sym mid = ci->mid; struct RClass* target_class = MRB_PROC_TARGET_CLASS(ci->proc); mrb_assert(bidx < ci->nregs); if (mid == 0 || !target_class) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (target_class->tt == MRB_TT_MODULE) { target_class = ci->target_class; if (target_class->tt != MRB_TT_ICLASS) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_RUNTIME_ERROR, "superclass info lost [mruby limitations]"); mrb_exc_set(mrb, exc); goto L_RAISE; } } recv = regs[0]; if (!mrb_obj_is_kind_of(mrb, recv, target_class)) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context"); mrb_exc_set(mrb, exc); goto L_RAISE; } blk = regs[bidx]; if (!mrb_nil_p(blk) && mrb_type(blk) != MRB_TT_PROC) { blk = mrb_convert_type(mrb, blk, MRB_TT_PROC, "Proc", "to_proc"); /* The stack or ci stack might have been reallocated during mrb_convert_type(), see #3622 and #3784 */ regs[bidx] = blk; ci = mrb->c->ci; } c = target_class->super; m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); if (mid != missing) { c = mrb_class(mrb, recv); } m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m)) { mrb_value args = (argc < 0) ? regs[a+1] : mrb_ary_new_from_values(mrb, n, regs+a+1); ERR_PC_SET(mrb, pc); mrb_method_missing(mrb, mid, recv, args); } mid = missing; if (argc >= 0) { if (a+2 >= ci->nregs) { stack_extend(mrb, a+3); } regs[a+1] = mrb_ary_new_from_values(mrb, n, regs+a+1); regs[a+2] = blk; argc = -1; } mrb_ary_unshift(mrb, regs[a+1], mrb_symbol_value(ci->mid)); } /* push callinfo */ ci = cipush(mrb); ci->mid = mid; ci->stackent = mrb->c->stack; ci->target_class = c; ci->pc = pc + 1; ci->argc = argc; /* prepare stack */ mrb->c->stack += a; mrb->c->stack[0] = recv; if (MRB_METHOD_CFUNC_P(m)) { mrb_value v; ci->nregs = (argc < 0) ? 3 : n+2; if (MRB_METHOD_PROC_P(m)) { ci->proc = MRB_METHOD_PROC(m); } v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (!ci->target_class) { /* return from context modifying method (resume/yield) */ if (ci->acc == CI_ACC_RESUMED) { mrb->jmp = prev_jmp; return v; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->stack[0] = v; /* pop stackpos */ mrb->c->stack = ci->stackent; pc = ci->pc; cipop(mrb); JUMP; } else { /* fill callinfo */ ci->acc = a; /* setup environment for calling method */ proc = ci->proc = MRB_METHOD_PROC(m); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, (argc < 0 && ci->nregs < 3) ? 3 : ci->nregs); pc = irep->iseq; JUMP; } } CASE(OP_ARGARY) { /* A Bx R(A) := argument array (16=6:1:5:4) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); int m1 = (bx>>10)&0x3f; int r = (bx>>9)&0x1; int m2 = (bx>>4)&0x1f; int lv = (bx>>0)&0xf; mrb_value *stack; if (mrb->c->ci->mid == 0 || mrb->c->ci->target_class == NULL) { mrb_value exc; L_NOSUPER: exc = mrb_exc_new_str_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e) goto L_NOSUPER; if (MRB_ENV_STACK_LEN(e) <= m1+r+m2+1) goto L_NOSUPER; stack = e->stack + 1; } if (r == 0) { regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack); } else { mrb_value *pp = NULL; struct RArray *rest; int len = 0; if (mrb_array_p(stack[m1])) { struct RArray *ary = mrb_ary_ptr(stack[m1]); pp = ARY_PTR(ary); len = (int)ARY_LEN(ary); } regs[a] = mrb_ary_new_capa(mrb, m1+len+m2); rest = mrb_ary_ptr(regs[a]); if (m1 > 0) { stack_copy(ARY_PTR(rest), stack, m1); } if (len > 0) { stack_copy(ARY_PTR(rest)+m1, pp, len); } if (m2 > 0) { stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2); } ARY_SET_LEN(rest, m1+len+m2); } regs[a+1] = stack[m1+r+m2]; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ENTER) { /* Ax arg setup according to flags (23=5:5:1:5:5:1:1) */ /* number of optional arguments times OP_JMP should follow */ mrb_aspec ax = GETARG_Ax(i); int m1 = MRB_ASPEC_REQ(ax); int o = MRB_ASPEC_OPT(ax); int r = MRB_ASPEC_REST(ax); int m2 = MRB_ASPEC_POST(ax); /* unused int k = MRB_ASPEC_KEY(ax); int kd = MRB_ASPEC_KDICT(ax); int b = MRB_ASPEC_BLOCK(ax); */ int argc = mrb->c->ci->argc; mrb_value *argv = regs+1; mrb_value *argv0 = argv; int len = m1 + o + r + m2; mrb_value *blk = &argv[argc < 0 ? 1 : argc]; if (argc < 0) { struct RArray *ary = mrb_ary_ptr(regs[1]); argv = ARY_PTR(ary); argc = (int)ARY_LEN(ary); mrb_gc_protect(mrb, regs[1]); } if (mrb->c->ci->proc && MRB_PROC_STRICT_P(mrb->c->ci->proc)) { if (argc >= 0) { if (argc < m1 + m2 || (r == 0 && argc > len)) { argnum_error(mrb, m1+m2); goto L_RAISE; } } } else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) { mrb_gc_protect(mrb, argv[0]); argc = (int)RARRAY_LEN(argv[0]); argv = RARRAY_PTR(argv[0]); } if (argc < len) { int mlen = m2; if (argc < m1+m2) { if (m1 < argc) mlen = argc - m1; else mlen = 0; } regs[len+1] = *blk; /* move block */ SET_NIL_VALUE(regs[argc+1]); if (argv0 != argv) { value_move(&regs[1], argv, argc-mlen); /* m1 + o */ } if (argc < m1) { stack_clear(&regs[argc+1], m1-argc); } if (mlen) { value_move(&regs[len-m2+1], &argv[argc-mlen], mlen); } if (mlen < m2) { stack_clear(&regs[len-m2+mlen+1], m2-mlen); } if (r) { regs[m1+o+1] = mrb_ary_new_capa(mrb, 0); } if (o == 0 || argc < m1+m2) pc++; else pc += argc - m1 - m2 + 1; } else { int rnum = 0; if (argv0 != argv) { regs[len+1] = *blk; /* move block */ value_move(&regs[1], argv, m1+o); } if (r) { rnum = argc-m1-o-m2; regs[m1+o+1] = mrb_ary_new_from_values(mrb, rnum, argv+m1+o); } if (m2) { if (argc-m2 > m1) { value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2); } } if (argv0 == argv) { regs[len+1] = *blk; /* move block */ } pc += o + 1; } mrb->c->ci->argc = len; /* clear local (but non-argument) variables */ if (irep->nlocals-len-2 > 0) { stack_clear(&regs[len+2], irep->nlocals-len-2); } JUMP; } CASE(OP_KARG) { /* A B C R(A) := kdict[Syms(B)]; if C kdict.rm(Syms(B)) */ /* if C == 2; raise unless kdict.empty? */ /* OP_JMP should follow to skip init code */ NEXT; } CASE(OP_KDICT) { /* A C R(A) := kdict */ NEXT; } L_RETURN: i = MKOP_AB(OP_RETURN, GETARG_A(i), OP_R_NORMAL); /* fall through */ CASE(OP_RETURN) { /* A B return R(A) (B=normal,in-block return/break) */ mrb_callinfo *ci; #define ecall_adjust() do {\ ptrdiff_t cioff = ci - mrb->c->cibase;\ ecall(mrb);\ ci = mrb->c->cibase + cioff;\ } while (0) ci = mrb->c->ci; if (ci->mid) { mrb_value blk; if (ci->argc < 0) { blk = regs[2]; } else { blk = regs[ci->argc+1]; } if (mrb_type(blk) == MRB_TT_PROC) { struct RProc *p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p) && ci > mrb->c->cibase && MRB_PROC_ENV(p) == ci[-1].env) { p->flags |= MRB_PROC_ORPHAN; } } } if (mrb->exc) { mrb_callinfo *ci0; L_RAISE: ci0 = ci = mrb->c->ci; if (ci == mrb->c->cibase) { if (ci->ridx == 0) goto L_FTOP; goto L_RESCUE; } while (ci[0].ridx == ci[-1].ridx) { cipop(mrb); mrb->c->stack = ci->stackent; if (ci->acc == CI_ACC_SKIP && prev_jmp) { mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } ci = mrb->c->ci; if (ci == mrb->c->cibase) { if (ci->ridx == 0) { L_FTOP: /* fiber top */ if (mrb->c == mrb->root_c) { mrb->c->stack = mrb->c->stbase; goto L_STOP; } else { struct mrb_context *c = mrb->c; while (c->eidx > ci->epos) { ecall_adjust(); } if (c->fib) { mrb_write_barrier(mrb, (struct RBasic*)c->fib); } mrb->c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; goto L_RAISE; } } break; } /* call ensure only when we skip this callinfo */ if (ci[0].ridx == ci[-1].ridx) { while (mrb->c->eidx > ci->epos) { ecall_adjust(); } } } L_RESCUE: if (ci->ridx == 0) goto L_STOP; proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; if (ci < ci0) { mrb->c->stack = ci[1].stackent; } stack_extend(mrb, irep->nregs); pc = mrb->c->rescue[--ci->ridx]; } else { int acc; mrb_value v; struct RProc *dst; ci = mrb->c->ci; v = regs[GETARG_A(i)]; mrb_gc_protect(mrb, v); switch (GETARG_B(i)) { case OP_R_RETURN: /* Fall through to OP_R_NORMAL otherwise */ if (ci->acc >=0 && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) { mrb_callinfo *cibase = mrb->c->cibase; dst = top_proc(mrb, proc); if (MRB_PROC_ENV_P(dst)) { struct REnv *e = MRB_PROC_ENV(dst); if (!MRB_ENV_STACK_SHARED_P(e) || e->cxt != mrb->c) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } } while (cibase <= ci && ci->proc != dst) { if (ci->acc < 0) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci--; } if (ci <= cibase) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } break; } case OP_R_NORMAL: NORMAL_RETURN: if (ci == mrb->c->cibase) { struct mrb_context *c; if (!mrb->c->prev) { /* toplevel return */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } if (mrb->c->prev->ci == mrb->c->prev->cibase) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_FIBER_ERROR, "double resume"); mrb_exc_set(mrb, exc); goto L_RAISE; } while (mrb->c->eidx > 0) { ecall(mrb); } /* automatic yield at the end */ c = mrb->c; c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; mrb->c->status = MRB_FIBER_RUNNING; ci = mrb->c->ci; } break; case OP_R_BREAK: if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN; if (MRB_PROC_ORPHAN_P(proc)) { mrb_value exc; L_BREAK_ERROR: exc = mrb_exc_new_str_lit(mrb, E_LOCALJUMP_ERROR, "break from proc-closure"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_STACK_SHARED_P(MRB_PROC_ENV(proc))) { goto L_BREAK_ERROR; } else { struct REnv *e = MRB_PROC_ENV(proc); if (e == mrb->c->cibase->env && proc != mrb->c->cibase->proc) { goto L_BREAK_ERROR; } if (e->cxt != mrb->c) { goto L_BREAK_ERROR; } } while (mrb->c->eidx > mrb->c->ci->epos) { ecall_adjust(); } /* break from fiber block */ if (ci == mrb->c->cibase && ci->pc) { struct mrb_context *c = mrb->c; mrb->c = c->prev; c->prev = NULL; ci = mrb->c->ci; } if (ci->acc < 0) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->exc = (struct RObject*)break_new(mrb, proc, v); mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } if (FALSE) { L_BREAK: v = ((struct RBreak*)mrb->exc)->val; proc = ((struct RBreak*)mrb->exc)->proc; mrb->exc = NULL; ci = mrb->c->ci; } mrb->c->stack = ci->stackent; proc = proc->upper; while (mrb->c->cibase < ci && ci[-1].proc != proc) { if (ci[-1].acc == CI_ACC_SKIP) { while (ci < mrb->c->ci) { cipop(mrb); } goto L_BREAK_ERROR; } ci--; } if (ci == mrb->c->cibase) { goto L_BREAK_ERROR; } break; default: /* cannot happen */ break; } while (ci < mrb->c->ci) { cipop(mrb); } ci[0].ridx = ci[-1].ridx; while (mrb->c->eidx > ci->epos) { ecall_adjust(); } if (mrb->c->vmexec && !ci->target_class) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } acc = ci->acc; mrb->c->stack = ci->stackent; cipop(mrb); if (acc == CI_ACC_SKIP || acc == CI_ACC_DIRECT) { mrb_gc_arena_restore(mrb, ai); mrb->jmp = prev_jmp; return v; } pc = ci->pc; ci = mrb->c->ci; DEBUG(fprintf(stderr, "from :%s\n", mrb_sym2name(mrb, ci->mid))); proc = mrb->c->ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; regs[acc] = v; mrb_gc_arena_restore(mrb, ai); } JUMP; } CASE(OP_TAILCALL) { /* A B C return call(R(A),Syms(B),R(A+1),... ,R(A+C+1)) */ int a = GETARG_A(i); int b = GETARG_B(i); int n = GETARG_C(i); mrb_method_t m; struct RClass *c; mrb_callinfo *ci; mrb_value recv; mrb_sym mid = syms[b]; recv = regs[a]; c = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &c, mid); if (MRB_METHOD_UNDEF_P(m)) { mrb_value sym = mrb_symbol_value(mid); mrb_sym missing = mrb_intern_lit(mrb, "method_missing"); m = mrb_method_search_vm(mrb, &c, missing); if (MRB_METHOD_UNDEF_P(m)) { mrb_value args; if (n == CALL_MAXARGS) { args = regs[a+1]; } else { args = mrb_ary_new_from_values(mrb, n, regs+a+1); } ERR_PC_SET(mrb, pc); mrb_method_missing(mrb, mid, recv, args); } mid = missing; if (n == CALL_MAXARGS) { mrb_ary_unshift(mrb, regs[a+1], sym); } else { value_move(regs+a+2, regs+a+1, ++n); regs[a+1] = sym; } } /* replace callinfo */ ci = mrb->c->ci; ci->mid = mid; ci->target_class = c; if (n == CALL_MAXARGS) { ci->argc = -1; } else { ci->argc = n; } /* move stack */ value_move(mrb->c->stack, &regs[a], ci->argc+1); if (MRB_METHOD_CFUNC_P(m)) { mrb_value v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb->c->stack[0] = v; mrb_gc_arena_restore(mrb, ai); goto L_RETURN; } else { /* setup environment for calling method */ struct RProc *p = MRB_METHOD_PROC(m); irep = p->body.irep; pool = irep->pool; syms = irep->syms; if (ci->argc < 0) { stack_extend(mrb, (irep->nregs < 3) ? 3 : irep->nregs); } else { stack_extend(mrb, irep->nregs); } pc = irep->iseq; } JUMP; } CASE(OP_BLKPUSH) { /* A Bx R(A) := block (16=6:1:5:4) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); int m1 = (bx>>10)&0x3f; int r = (bx>>9)&0x1; int m2 = (bx>>4)&0x1f; int lv = (bx>>0)&0xf; mrb_value *stack; if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e || (!MRB_ENV_STACK_SHARED_P(e) && e->mid == 0) || MRB_ENV_STACK_LEN(e) <= m1+r+m2+1) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } stack = e->stack + 1; } if (mrb_nil_p(stack[m1+r+m2])) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } regs[a] = stack[m1+r+m2]; NEXT; } #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff)) #define OP_MATH_BODY(op,v1,v2) do {\ v1(regs[a]) = v1(regs[a]) op v2(regs[a+1]);\ } while(0) CASE(OP_ADD) { /* A B C R(A) := R(A)+R(A+1) (Syms[B]=:+,C=1)*/ int a = GETARG_A(i); /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): { mrb_int x, y, z; mrb_value *regs_a = regs + a; x = mrb_fixnum(regs_a[0]); y = mrb_fixnum(regs_a[1]); if (mrb_int_add_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs_a[0], (mrb_float)x + (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): { mrb_int x = mrb_fixnum(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x + y); } break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x + y); } #else OP_MATH_BODY(+,mrb_float,mrb_fixnum); #endif break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x + y); } #else OP_MATH_BODY(+,mrb_float,mrb_float); #endif break; #endif case TYPES2(MRB_TT_STRING,MRB_TT_STRING): regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); break; default: goto L_SEND; } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_SUB) { /* A B C R(A) := R(A)-R(A+1) (Syms[B]=:-,C=1)*/ int a = GETARG_A(i); /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): { mrb_int x, y, z; x = mrb_fixnum(regs[a]); y = mrb_fixnum(regs[a+1]); if (mrb_int_sub_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x - (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): { mrb_int x = mrb_fixnum(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x - y); } break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x - y); } #else OP_MATH_BODY(-,mrb_float,mrb_fixnum); #endif break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x - y); } #else OP_MATH_BODY(-,mrb_float,mrb_float); #endif break; #endif default: goto L_SEND; } NEXT; } CASE(OP_MUL) { /* A B C R(A) := R(A)*R(A+1) (Syms[B]=:*,C=1)*/ int a = GETARG_A(i); /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): { mrb_int x, y, z; x = mrb_fixnum(regs[a]); y = mrb_fixnum(regs[a+1]); if (mrb_int_mul_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x * (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): { mrb_int x = mrb_fixnum(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x * y); } break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x * y); } #else OP_MATH_BODY(*,mrb_float,mrb_fixnum); #endif break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); mrb_float y = mrb_float(regs[a+1]); SET_FLOAT_VALUE(mrb, regs[a], x * y); } #else OP_MATH_BODY(*,mrb_float,mrb_float); #endif break; #endif default: goto L_SEND; } NEXT; } CASE(OP_DIV) { /* A B C R(A) := R(A)/R(A+1) (Syms[B]=:/,C=1)*/ int a = GETARG_A(i); #ifndef MRB_WITHOUT_FLOAT double x, y, f; #endif /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM): #ifdef MRB_WITHOUT_FLOAT { mrb_int x = mrb_fixnum(regs[a]); mrb_int y = mrb_fixnum(regs[a+1]); SET_INT_VALUE(regs[a], y ? x / y : 0); } break; #else x = (mrb_float)mrb_fixnum(regs[a]); y = (mrb_float)mrb_fixnum(regs[a+1]); break; case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT): x = (mrb_float)mrb_fixnum(regs[a]); y = mrb_float(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM): x = mrb_float(regs[a]); y = (mrb_float)mrb_fixnum(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): x = mrb_float(regs[a]); y = mrb_float(regs[a+1]); break; #endif default: goto L_SEND; } #ifndef MRB_WITHOUT_FLOAT if (y == 0) { if (x > 0) f = INFINITY; else if (x < 0) f = -INFINITY; else /* if (x == 0) */ f = NAN; } else { f = x / y; } SET_FLOAT_VALUE(mrb, regs[a], f); #endif NEXT; } CASE(OP_ADDI) { /* A B C R(A) := R(A)+C (Syms[B]=:+)*/ int a = GETARG_A(i); /* need to check if + is overridden */ switch (mrb_type(regs[a])) { case MRB_TT_FIXNUM: { mrb_int x = mrb_fixnum(regs[a]); mrb_int y = GETARG_C(i); mrb_int z; if (mrb_int_add_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs[a], (mrb_float)x + (mrb_float)y); break; #endif } SET_INT_VALUE(regs[a], z); } break; #ifndef MRB_WITHOUT_FLOAT case MRB_TT_FLOAT: #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); SET_FLOAT_VALUE(mrb, regs[a], x + GETARG_C(i)); } #else mrb_float(regs[a]) += GETARG_C(i); #endif break; #endif default: SET_INT_VALUE(regs[a+1], GETARG_C(i)); i = MKOP_ABC(OP_SEND, a, GETARG_B(i), 1); goto L_SEND; } NEXT; } CASE(OP_SUBI) { /* A B C R(A) := R(A)-C (Syms[B]=:-)*/ int a = GETARG_A(i); mrb_value *regs_a = regs + a; /* need to check if + is overridden */ switch (mrb_type(regs_a[0])) { case MRB_TT_FIXNUM: { mrb_int x = mrb_fixnum(regs_a[0]); mrb_int y = GETARG_C(i); mrb_int z; if (mrb_int_sub_overflow(x, y, &z)) { #ifndef MRB_WITHOUT_FLOAT SET_FLOAT_VALUE(mrb, regs_a[0], (mrb_float)x - (mrb_float)y); break; #endif } SET_INT_VALUE(regs_a[0], z); } break; #ifndef MRB_WITHOUT_FLOAT case MRB_TT_FLOAT: #ifdef MRB_WORD_BOXING { mrb_float x = mrb_float(regs[a]); SET_FLOAT_VALUE(mrb, regs[a], x - GETARG_C(i)); } #else mrb_float(regs_a[0]) -= GETARG_C(i); #endif break; #endif default: SET_INT_VALUE(regs_a[1], GETARG_C(i)); i = MKOP_ABC(OP_SEND, a, GETARG_B(i), 1); goto L_SEND; } NEXT; } #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1])) #ifdef MRB_WITHOUT_FLOAT #define OP_CMP(op) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ default:\ goto L_SEND;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #else #define OP_CMP(op) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_FIXNUM,MRB_TT_FIXNUM):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FIXNUM,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FIXNUM):\ result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_float,mrb_float);\ break;\ default:\ goto L_SEND;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #endif CASE(OP_EQ) { /* A B C R(A) := R(A)==R(A+1) (Syms[B]=:==,C=1)*/ int a = GETARG_A(i); if (mrb_obj_eq(mrb, regs[a], regs[a+1])) { SET_TRUE_VALUE(regs[a]); } else { OP_CMP(==); } NEXT; } CASE(OP_LT) { /* A B C R(A) := R(A)<R(A+1) (Syms[B]=:<,C=1)*/ int a = GETARG_A(i); OP_CMP(<); NEXT; } CASE(OP_LE) { /* A B C R(A) := R(A)<=R(A+1) (Syms[B]=:<=,C=1)*/ int a = GETARG_A(i); OP_CMP(<=); NEXT; } CASE(OP_GT) { /* A B C R(A) := R(A)>R(A+1) (Syms[B]=:>,C=1)*/ int a = GETARG_A(i); OP_CMP(>); NEXT; } CASE(OP_GE) { /* A B C R(A) := R(A)>=R(A+1) (Syms[B]=:>=,C=1)*/ int a = GETARG_A(i); OP_CMP(>=); NEXT; } CASE(OP_ARRAY) { /* A B C R(A) := ary_new(R(B),R(B+1)..R(B+C)) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value v = mrb_ary_new_from_values(mrb, c, &regs[b]); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYCAT) { /* A B mrb_ary_concat(R(A),R(B)) */ int a = GETARG_A(i); int b = GETARG_B(i); mrb_value splat = mrb_ary_splat(mrb, regs[b]); mrb_ary_concat(mrb, regs[a], splat); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYPUSH) { /* A B R(A).push(R(B)) */ int a = GETARG_A(i); int b = GETARG_B(i); mrb_ary_push(mrb, regs[a], regs[b]); NEXT; } CASE(OP_AREF) { /* A B C R(A) := R(B)[C] */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_value v = regs[b]; if (!mrb_array_p(v)) { if (c == 0) { regs[a] = v; } else { SET_NIL_VALUE(regs[a]); } } else { v = mrb_ary_ref(mrb, v, c); regs[a] = v; } NEXT; } CASE(OP_ASET) { /* A B C R(B)[C] := R(A) */ int a = GETARG_A(i); int b = GETARG_B(i); int c = GETARG_C(i); mrb_ary_set(mrb, regs[b], c, regs[a]); NEXT; } CASE(OP_APOST) { /* A B C *R(A),R(A+1)..R(A+C) := R(A) */ int a = GETARG_A(i); mrb_value v = regs[a]; int pre = GETARG_B(i); int post = GETARG_C(i); struct RArray *ary; int len, idx; if (!mrb_array_p(v)) { v = mrb_ary_new_from_values(mrb, 1, &regs[a]); } ary = mrb_ary_ptr(v); len = (int)ARY_LEN(ary); if (len > pre + post) { v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre); regs[a++] = v; while (post--) { regs[a++] = ARY_PTR(ary)[len-post-1]; } } else { v = mrb_ary_new_capa(mrb, 0); regs[a++] = v; for (idx=0; idx+pre<len; idx++) { regs[a+idx] = ARY_PTR(ary)[pre+idx]; } while (idx < post) { SET_NIL_VALUE(regs[a+idx]); idx++; } } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRING) { /* A Bx R(A) := str_new(Lit(Bx)) */ mrb_int a = GETARG_A(i); mrb_int bx = GETARG_Bx(i); mrb_value str = mrb_str_dup(mrb, pool[bx]); regs[a] = str; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRCAT) { /* A B R(A).concat(R(B)) */ mrb_int a = GETARG_A(i); mrb_int b = GETARG_B(i); mrb_str_concat(mrb, regs[a], regs[b]); NEXT; } CASE(OP_HASH) { /* A B C R(A) := hash_new(R(B),R(B+1)..R(B+C)) */ int b = GETARG_B(i); int c = GETARG_C(i); int lim = b+c*2; mrb_value hash = mrb_hash_new_capa(mrb, c); while (b < lim) { mrb_hash_set(mrb, hash, regs[b], regs[b+1]); b+=2; } regs[GETARG_A(i)] = hash; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_LAMBDA) { /* A b c R(A) := lambda(SEQ[b],c) (b:c = 14:2) */ struct RProc *p; int a = GETARG_A(i); int b = GETARG_b(i); int c = GETARG_c(i); mrb_irep *nirep = irep->reps[b]; if (c & OP_L_CAPTURE) { p = mrb_closure_new(mrb, nirep); } else { p = mrb_proc_new(mrb, nirep); p->flags |= MRB_PROC_SCOPE; } if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT; regs[a] = mrb_obj_value(p); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_OCLASS) { /* A R(A) := ::Object */ regs[GETARG_A(i)] = mrb_obj_value(mrb->object_class); NEXT; } CASE(OP_CLASS) { /* A B R(A) := newclass(R(A),Syms(B),R(A+1)) */ struct RClass *c = 0, *baseclass; int a = GETARG_A(i); mrb_value base, super; mrb_sym id = syms[GETARG_B(i)]; base = regs[a]; super = regs[a+1]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); base = mrb_obj_value(baseclass); } c = mrb_vm_define_class(mrb, base, super, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_MODULE) { /* A B R(A) := newmodule(R(A),Syms(B)) */ struct RClass *c = 0, *baseclass; int a = GETARG_A(i); mrb_value base; mrb_sym id = syms[GETARG_B(i)]; base = regs[a]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); base = mrb_obj_value(baseclass); } c = mrb_vm_define_module(mrb, base, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EXEC) { /* A Bx R(A) := blockexec(R(A),SEQ[Bx]) */ int a = GETARG_A(i); int bx = GETARG_Bx(i); mrb_callinfo *ci; mrb_value recv = regs[a]; struct RProc *p; mrb_irep *nirep = irep->reps[bx]; /* prepare closure */ p = mrb_proc_new(mrb, nirep); p->c = NULL; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc); MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv)); p->flags |= MRB_PROC_SCOPE; /* prepare call stack */ ci = cipush(mrb); ci->pc = pc + 1; ci->acc = a; ci->mid = 0; ci->stackent = mrb->c->stack; ci->argc = 0; ci->target_class = mrb_class_ptr(recv); /* prepare stack */ mrb->c->stack += a; /* setup block to call */ ci->proc = p; irep = p->body.irep; pool = irep->pool; syms = irep->syms; ci->nregs = irep->nregs; stack_extend(mrb, ci->nregs); stack_clear(regs+1, ci->nregs-1); pc = irep->iseq; JUMP; } CASE(OP_METHOD) { /* A B R(A).newmethod(Syms(B),R(A+1)) */ int a = GETARG_A(i); struct RClass *c = mrb_class_ptr(regs[a]); struct RProc *p = mrb_proc_ptr(regs[a+1]); mrb_method_t m; MRB_METHOD_FROM_PROC(m, p); mrb_define_method_raw(mrb, c, syms[GETARG_B(i)], m); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_SCLASS) { /* A B R(A) := R(B).singleton_class */ int a = GETARG_A(i); int b = GETARG_B(i); regs[a] = mrb_singleton_class(mrb, regs[b]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_TCLASS) { /* A R(A) := target_class */ if (!mrb->c->ci->target_class) { mrb_value exc = mrb_exc_new_str_lit(mrb, E_TYPE_ERROR, "no target class or module"); mrb_exc_set(mrb, exc); goto L_RAISE; } regs[GETARG_A(i)] = mrb_obj_value(mrb->c->ci->target_class); NEXT; } CASE(OP_RANGE) { /* A B C R(A) := range_new(R(B),R(B+1),C) */ int b = GETARG_B(i); mrb_value val = mrb_range_new(mrb, regs[b], regs[b+1], GETARG_C(i)); regs[GETARG_A(i)] = val; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_DEBUG) { /* A B C debug print R(A),R(B),R(C) */ #ifdef MRB_ENABLE_DEBUG_HOOK mrb->debug_op_hook(mrb, irep, pc, regs); #else #ifndef MRB_DISABLE_STDIO printf("OP_DEBUG %d %d %d\n", GETARG_A(i), GETARG_B(i), GETARG_C(i)); #else abort(); #endif #endif NEXT; } CASE(OP_STOP) { /* stop VM */ L_STOP: while (mrb->c->eidx > 0) { ecall(mrb); } ERR_PC_CLR(mrb); mrb->jmp = prev_jmp; if (mrb->exc) { return mrb_obj_value(mrb->exc); } return regs[irep->nlocals]; } CASE(OP_ERR) { /* Bx raise RuntimeError with message Lit(Bx) */ mrb_value msg = mrb_str_dup(mrb, pool[GETARG_Bx(i)]); mrb_value exc; if (GETARG_A(i) == 0) { exc = mrb_exc_new_str(mrb, E_RUNTIME_ERROR, msg); } else { exc = mrb_exc_new_str(mrb, E_LOCALJUMP_ERROR, msg); } ERR_PC_SET(mrb, pc); mrb_exc_set(mrb, exc); goto L_RAISE; } } END_DISPATCH; #undef regs } MRB_CATCH(&c_jmp) { exc_catched = TRUE; goto RETRY_TRY_BLOCK; } MRB_END_EXC(&c_jmp); } MRB_API mrb_value mrb_run(mrb_state *mrb, struct RProc *proc, mrb_value self) { if (mrb->c->ci->argc < 0) { return mrb_vm_run(mrb, proc, self, 3); /* receiver, args and block) */ } else { return mrb_vm_run(mrb, proc, self, mrb->c->ci->argc + 2); /* argc + 2 (receiver and block) */ } } MRB_API mrb_value mrb_top_run(mrb_state *mrb, struct RProc *proc, mrb_value self, unsigned int stack_keep) { mrb_callinfo *ci; mrb_value v; if (!mrb->c->cibase) { return mrb_vm_run(mrb, proc, self, stack_keep); } if (mrb->c->ci == mrb->c->cibase) { return mrb_vm_run(mrb, proc, self, stack_keep); } ci = cipush(mrb); ci->mid = 0; ci->nregs = 1; /* protect the receiver */ ci->acc = CI_ACC_SKIP; ci->target_class = mrb->object_class; v = mrb_vm_run(mrb, proc, self, stack_keep); cipop(mrb); return v; } #if defined(MRB_ENABLE_CXX_EXCEPTION) && defined(__cplusplus) # if !defined(MRB_ENABLE_CXX_ABI) } /* end of extern "C" */ # endif mrb_int mrb_jmpbuf::jmpbuf_id = 0; # if !defined(MRB_ENABLE_CXX_ABI) extern "C" { # endif #endif
./CrossVul/dataset_final_sorted/CWE-416/c/bad_84_0
crossvul-cpp_data_good_3400_0
/* ** gc.c - garbage collector for mruby ** ** See Copyright Notice in mruby.h */ #include <string.h> #include <stdlib.h> #include <mruby.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/data.h> #include <mruby/hash.h> #include <mruby/proc.h> #include <mruby/range.h> #include <mruby/string.h> #include <mruby/variable.h> #include <mruby/gc.h> #include <mruby/error.h> /* = Tri-color Incremental Garbage Collection mruby's GC is Tri-color Incremental GC with Mark & Sweep. Algorithm details are omitted. Instead, the implementation part is described below. == Object's Color Each object can be painted in three colors: * White - Unmarked. * Gray - Marked, But the child objects are unmarked. * Black - Marked, the child objects are also marked. == Two White Types There're two white color types in a flip-flop fashion: White-A and White-B, which respectively represent the Current White color (the newly allocated objects in the current GC cycle) and the Sweep Target White color (the dead objects to be swept). A and B will be switched just at the beginning of the next GC cycle. At that time, all the dead objects have been swept, while the newly created objects in the current GC cycle which finally remains White are now regarded as dead objects. Instead of traversing all the White-A objects and painting them as White-B, just switch the meaning of White-A and White-B as this will be much cheaper. As a result, the objects we sweep in the current GC cycle are always left from the previous GC cycle. This allows us to sweep objects incrementally, without the disturbance of the newly created objects. == Execution Timing GC Execution Time and Each step interval are decided by live objects count. List of Adjustment API: * gc_interval_ratio_set * gc_step_ratio_set For details, see the comments for each function. == Write Barrier mruby implementer and C extension library writer must insert a write barrier when updating a reference from a field of an object. When updating a reference from a field of object A to object B, two different types of write barrier are available: * mrb_field_write_barrier - target B object for a mark. * mrb_write_barrier - target A object for a mark. == Generational Mode mruby's GC offers an Generational Mode while re-using the tri-color GC infrastructure. It will treat the Black objects as Old objects after each sweep phase, instead of painting them White. The key ideas are still the same as traditional generational GC: * Minor GC - just traverse the Young objects (Gray objects) in the mark phase, then only sweep the newly created objects, and leave the Old objects live. * Major GC - same as a full regular GC cycle. The difference from "traditional" generational GC is, that the major GC in mruby is triggered incrementally in a tri-color manner. For details, see the comments for each function. */ struct free_obj { MRB_OBJECT_HEADER; struct RBasic *next; }; typedef struct { union { struct free_obj free; struct RBasic basic; struct RObject object; struct RClass klass; struct RString string; struct RArray array; struct RHash hash; struct RRange range; struct RData data; struct RProc proc; struct REnv env; struct RException exc; #ifdef MRB_WORD_BOXING struct RFloat floatv; struct RCptr cptr; #endif } as; } RVALUE; #ifdef GC_PROFILE #include <stdio.h> #include <sys/time.h> static double program_invoke_time = 0; static double gc_time = 0; static double gc_total_time = 0; static double gettimeofday_time(void) { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + tv.tv_usec * 1e-6; } #define GC_INVOKE_TIME_REPORT(with) do {\ fprintf(stderr, "%s\n", with);\ fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\ fprintf(stderr, "is_generational: %d\n", is_generational(gc));\ fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\ } while(0) #define GC_TIME_START do {\ gc_time = gettimeofday_time();\ } while(0) #define GC_TIME_STOP_AND_REPORT do {\ gc_time = gettimeofday_time() - gc_time;\ gc_total_time += gc_time;\ fprintf(stderr, "gc_state: %d\n", gc->state);\ fprintf(stderr, "live: %zu\n", gc->live);\ fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\ fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\ fprintf(stderr, "gc_time: %30.20f\n", gc_time);\ fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\ } while(0) #else #define GC_INVOKE_TIME_REPORT(s) #define GC_TIME_START #define GC_TIME_STOP_AND_REPORT #endif #ifdef GC_DEBUG #define DEBUG(x) (x) #else #define DEBUG(x) #endif #ifndef MRB_HEAP_PAGE_SIZE #define MRB_HEAP_PAGE_SIZE 1024 #endif #define GC_STEP_SIZE 1024 /* white: 011, black: 100, gray: 000 */ #define GC_GRAY 0 #define GC_WHITE_A 1 #define GC_WHITE_B (1 << 1) #define GC_BLACK (1 << 2) #define GC_WHITES (GC_WHITE_A | GC_WHITE_B) #define GC_COLOR_MASK 7 #define paint_gray(o) ((o)->color = GC_GRAY) #define paint_black(o) ((o)->color = GC_BLACK) #define paint_white(o) ((o)->color = GC_WHITES) #define paint_partial_white(s, o) ((o)->color = (s)->current_white_part) #define is_gray(o) ((o)->color == GC_GRAY) #define is_white(o) ((o)->color & GC_WHITES) #define is_black(o) ((o)->color & GC_BLACK) #define flip_white_part(s) ((s)->current_white_part = other_white_part(s)) #define other_white_part(s) ((s)->current_white_part ^ GC_WHITES) #define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE) #define objects(p) ((RVALUE *)p->objects) MRB_API void* mrb_realloc_simple(mrb_state *mrb, void *p, size_t len) { void *p2; p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud); if (!p2 && len > 0 && mrb->gc.heaps) { mrb_full_gc(mrb); p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud); } return p2; } MRB_API void* mrb_realloc(mrb_state *mrb, void *p, size_t len) { void *p2; p2 = mrb_realloc_simple(mrb, p, len); if (!p2 && len) { if (mrb->gc.out_of_memory) { mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err)); /* mrb_panic(mrb); */ } else { mrb->gc.out_of_memory = TRUE; mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err)); } } else { mrb->gc.out_of_memory = FALSE; } return p2; } MRB_API void* mrb_malloc(mrb_state *mrb, size_t len) { return mrb_realloc(mrb, 0, len); } MRB_API void* mrb_malloc_simple(mrb_state *mrb, size_t len) { return mrb_realloc_simple(mrb, 0, len); } MRB_API void* mrb_calloc(mrb_state *mrb, size_t nelem, size_t len) { void *p; if (nelem > 0 && len > 0 && nelem <= SIZE_MAX / len) { size_t size; size = nelem * len; p = mrb_malloc(mrb, size); memset(p, 0, size); } else { p = NULL; } return p; } MRB_API void mrb_free(mrb_state *mrb, void *p) { (mrb->allocf)(mrb, p, 0, mrb->allocf_ud); } MRB_API mrb_bool mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) { return is_dead(&mrb->gc, object); } static void link_heap_page(mrb_gc *gc, mrb_heap_page *page) { page->next = gc->heaps; if (gc->heaps) gc->heaps->prev = page; gc->heaps = page; } static void unlink_heap_page(mrb_gc *gc, mrb_heap_page *page) { if (page->prev) page->prev->next = page->next; if (page->next) page->next->prev = page->prev; if (gc->heaps == page) gc->heaps = page->next; page->prev = NULL; page->next = NULL; } static void link_free_heap_page(mrb_gc *gc, mrb_heap_page *page) { page->free_next = gc->free_heaps; if (gc->free_heaps) { gc->free_heaps->free_prev = page; } gc->free_heaps = page; } static void unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page) { if (page->free_prev) page->free_prev->free_next = page->free_next; if (page->free_next) page->free_next->free_prev = page->free_prev; if (gc->free_heaps == page) gc->free_heaps = page->free_next; page->free_prev = NULL; page->free_next = NULL; } static void add_heap(mrb_state *mrb, mrb_gc *gc) { mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE)); RVALUE *p, *e; struct RBasic *prev = NULL; for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { p->as.free.tt = MRB_TT_FREE; p->as.free.next = prev; prev = &p->as.basic; } page->freelist = prev; link_heap_page(gc, page); link_free_heap_page(gc, page); } #define DEFAULT_GC_INTERVAL_RATIO 200 #define DEFAULT_GC_STEP_RATIO 200 #define DEFAULT_MAJOR_GC_INC_RATIO 200 #define is_generational(gc) ((gc)->generational) #define is_major_gc(gc) (is_generational(gc) && (gc)->full) #define is_minor_gc(gc) (is_generational(gc) && !(gc)->full) void mrb_gc_init(mrb_state *mrb, mrb_gc *gc) { #ifndef MRB_GC_FIXED_ARENA gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE); gc->arena_capa = MRB_GC_ARENA_SIZE; #endif gc->current_white_part = GC_WHITE_A; gc->heaps = NULL; gc->free_heaps = NULL; add_heap(mrb, gc); gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO; gc->step_ratio = DEFAULT_GC_STEP_RATIO; #ifndef MRB_GC_TURN_OFF_GENERATIONAL gc->generational = TRUE; gc->full = TRUE; #endif #ifdef GC_PROFILE program_invoke_time = gettimeofday_time(); #endif } static void obj_free(mrb_state *mrb, struct RBasic *obj, int end); void free_heap(mrb_state *mrb, mrb_gc *gc) { mrb_heap_page *page = gc->heaps; mrb_heap_page *tmp; RVALUE *p, *e; while (page) { tmp = page; page = page->next; for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { if (p->as.free.tt != MRB_TT_FREE) obj_free(mrb, &p->as.basic, TRUE); } mrb_free(mrb, tmp); } } void mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc) { free_heap(mrb, gc); #ifndef MRB_GC_FIXED_ARENA mrb_free(mrb, gc->arena); #endif } static void gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p) { #ifdef MRB_GC_FIXED_ARENA if (gc->arena_idx >= MRB_GC_ARENA_SIZE) { /* arena overflow error */ gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */ mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err)); } #else if (gc->arena_idx >= gc->arena_capa) { /* extend arena */ gc->arena_capa = (int)(gc->arena_capa * 1.5); gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa); } #endif gc->arena[gc->arena_idx++] = p; } /* mrb_gc_protect() leaves the object in the arena */ MRB_API void mrb_gc_protect(mrb_state *mrb, mrb_value obj) { if (mrb_immediate_p(obj)) return; gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj)); } #define GC_ROOT_NAME "_gc_root_" /* mrb_gc_register() keeps the object from GC. Register your object when it's exported to C world, without reference from Ruby world, e.g. callback arguments. Don't forget to remove the obejct using mrb_gc_unregister, otherwise your object will leak. */ MRB_API void mrb_gc_register(mrb_state *mrb, mrb_value obj) { mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME); mrb_value table = mrb_gv_get(mrb, root); if (mrb_nil_p(table) || mrb_type(table) != MRB_TT_ARRAY) { table = mrb_ary_new(mrb); mrb_gv_set(mrb, root, table); } mrb_ary_push(mrb, table, obj); } /* mrb_gc_unregister() removes the object from GC root. */ MRB_API void mrb_gc_unregister(mrb_state *mrb, mrb_value obj) { mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME); mrb_value table = mrb_gv_get(mrb, root); struct RArray *a; mrb_int i; if (mrb_nil_p(table)) return; if (mrb_type(table) != MRB_TT_ARRAY) { mrb_gv_set(mrb, root, mrb_nil_value()); return; } a = mrb_ary_ptr(table); mrb_ary_modify(mrb, a); for (i = 0; i < a->len; i++) { if (mrb_obj_eq(mrb, a->ptr[i], obj)) { a->len--; memmove(&a->ptr[i], &a->ptr[i + 1], (a->len - i) * sizeof(a->ptr[i])); break; } } } MRB_API struct RBasic* mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls) { struct RBasic *p; static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } }; mrb_gc *gc = &mrb->gc; if (cls) { enum mrb_vtype tt; switch (cls->tt) { case MRB_TT_CLASS: case MRB_TT_SCLASS: case MRB_TT_MODULE: case MRB_TT_ENV: break; default: mrb_raise(mrb, E_TYPE_ERROR, "allocation failure"); } tt = MRB_INSTANCE_TT(cls); if (tt != MRB_TT_FALSE && ttype != MRB_TT_SCLASS && ttype != MRB_TT_ICLASS && ttype != MRB_TT_ENV && ttype != tt) { mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls)); } } #ifdef MRB_GC_STRESS mrb_full_gc(mrb); #endif if (gc->threshold < gc->live) { mrb_incremental_gc(mrb); } if (gc->free_heaps == NULL) { add_heap(mrb, gc); } p = gc->free_heaps->freelist; gc->free_heaps->freelist = ((struct free_obj*)p)->next; if (gc->free_heaps->freelist == NULL) { unlink_free_heap_page(gc, gc->free_heaps); } gc->live++; gc_protect(mrb, gc, p); *(RVALUE *)p = RVALUE_zero; p->tt = ttype; p->c = cls; paint_partial_white(gc, p); return p; } static inline void add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) { #ifdef MRB_GC_STRESS if (obj->tt > MRB_TT_MAXDEFINE) { abort(); } #endif paint_gray(obj); obj->gcnext = gc->gray_list; gc->gray_list = obj; } static void mark_context_stack(mrb_state *mrb, struct mrb_context *c) { size_t i; size_t e; mrb_value nil; if (c->stack == NULL) return; e = c->stack - c->stbase; if (c->ci) e += c->ci->nregs; if (c->stbase + e > c->stend) e = c->stend - c->stbase; for (i=0; i<e; i++) { mrb_value v = c->stbase[i]; if (!mrb_immediate_p(v)) { mrb_gc_mark(mrb, mrb_basic_ptr(v)); } } e = c->stend - c->stbase; nil = mrb_nil_value(); for (; i<e; i++) { c->stbase[i] = nil; } } static void mark_context(mrb_state *mrb, struct mrb_context *c) { int i; mrb_callinfo *ci; /* mark stack */ mark_context_stack(mrb, c); /* mark VM stack */ if (c->cibase) { for (ci = c->cibase; ci <= c->ci; ci++) { mrb_gc_mark(mrb, (struct RBasic*)ci->env); mrb_gc_mark(mrb, (struct RBasic*)ci->proc); mrb_gc_mark(mrb, (struct RBasic*)ci->target_class); } } /* mark ensure stack */ for (i=0; i<c->esize; i++) { if (c->ensure[i] == NULL) break; mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]); } /* mark fibers */ if (c->prev && c->prev->fib) { mrb_gc_mark(mrb, (struct RBasic*)c->prev->fib); } } static void gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) { mrb_assert(is_gray(obj)); paint_black(obj); gc->gray_list = obj->gcnext; mrb_gc_mark(mrb, (struct RBasic*)obj->c); switch (obj->tt) { case MRB_TT_ICLASS: { struct RClass *c = (struct RClass*)obj; if (MRB_FLAG_TEST(c, MRB_FLAG_IS_ORIGIN)) mrb_gc_mark_mt(mrb, c); mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super); } break; case MRB_TT_CLASS: case MRB_TT_MODULE: case MRB_TT_SCLASS: { struct RClass *c = (struct RClass*)obj; mrb_gc_mark_mt(mrb, c); mrb_gc_mark(mrb, (struct RBasic*)c->super); } /* fall through */ case MRB_TT_OBJECT: case MRB_TT_DATA: case MRB_TT_EXCEPTION: mrb_gc_mark_iv(mrb, (struct RObject*)obj); break; case MRB_TT_PROC: { struct RProc *p = (struct RProc*)obj; mrb_gc_mark(mrb, (struct RBasic*)p->env); mrb_gc_mark(mrb, (struct RBasic*)p->target_class); } break; case MRB_TT_ENV: { struct REnv *e = (struct REnv*)obj; mrb_int i, len; if MRB_ENV_STACK_SHARED_P(e) break; len = MRB_ENV_STACK_LEN(e); for (i=0; i<len; i++) { mrb_gc_mark_value(mrb, e->stack[i]); } } break; case MRB_TT_FIBER: { struct mrb_context *c = ((struct RFiber*)obj)->cxt; if (c) mark_context(mrb, c); } break; case MRB_TT_ARRAY: { struct RArray *a = (struct RArray*)obj; size_t i, e; for (i=0,e=a->len; i<e; i++) { mrb_gc_mark_value(mrb, a->ptr[i]); } } break; case MRB_TT_HASH: mrb_gc_mark_iv(mrb, (struct RObject*)obj); mrb_gc_mark_hash(mrb, (struct RHash*)obj); break; case MRB_TT_STRING: break; case MRB_TT_RANGE: { struct RRange *r = (struct RRange*)obj; if (r->edges) { mrb_gc_mark_value(mrb, r->edges->beg); mrb_gc_mark_value(mrb, r->edges->end); } } break; default: break; } } MRB_API void mrb_gc_mark(mrb_state *mrb, struct RBasic *obj) { if (obj == 0) return; if (!is_white(obj)) return; mrb_assert((obj)->tt != MRB_TT_FREE); add_gray_list(mrb, &mrb->gc, obj); } static void obj_free(mrb_state *mrb, struct RBasic *obj, int end) { DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt)); switch (obj->tt) { /* immediate - no mark */ case MRB_TT_TRUE: case MRB_TT_FIXNUM: case MRB_TT_SYMBOL: /* cannot happen */ return; case MRB_TT_FLOAT: #ifdef MRB_WORD_BOXING break; #else return; #endif case MRB_TT_OBJECT: mrb_gc_free_iv(mrb, (struct RObject*)obj); break; case MRB_TT_EXCEPTION: mrb_gc_free_iv(mrb, (struct RObject*)obj); if ((struct RObject*)obj == mrb->backtrace.exc) mrb->backtrace.exc = 0; break; case MRB_TT_CLASS: case MRB_TT_MODULE: case MRB_TT_SCLASS: mrb_gc_free_mt(mrb, (struct RClass*)obj); mrb_gc_free_iv(mrb, (struct RObject*)obj); break; case MRB_TT_ICLASS: if (MRB_FLAG_TEST(obj, MRB_FLAG_IS_ORIGIN)) mrb_gc_free_mt(mrb, (struct RClass*)obj); break; case MRB_TT_ENV: { struct REnv *e = (struct REnv*)obj; if (MRB_ENV_STACK_SHARED_P(e)) { /* cannot be freed */ return; } mrb_free(mrb, e->stack); e->stack = NULL; } break; case MRB_TT_FIBER: { struct mrb_context *c = ((struct RFiber*)obj)->cxt; if (!end && c && c != mrb->root_c) { mrb_callinfo *ci = c->ci; mrb_callinfo *ce = c->cibase; while (ce <= ci) { struct REnv *e = ci->env; if (e && !is_dead(&mrb->gc, e) && e->tt == MRB_TT_ENV && MRB_ENV_STACK_SHARED_P(e)) { mrb_env_unshare(mrb, e); } ci--; } mrb_free_context(mrb, c); } } break; case MRB_TT_ARRAY: if (ARY_SHARED_P(obj)) mrb_ary_decref(mrb, ((struct RArray*)obj)->aux.shared); else mrb_free(mrb, ((struct RArray*)obj)->ptr); break; case MRB_TT_HASH: mrb_gc_free_iv(mrb, (struct RObject*)obj); mrb_gc_free_hash(mrb, (struct RHash*)obj); break; case MRB_TT_STRING: mrb_gc_free_str(mrb, (struct RString*)obj); break; case MRB_TT_PROC: { struct RProc *p = (struct RProc*)obj; if (!MRB_PROC_CFUNC_P(p) && p->body.irep) { mrb_irep_decref(mrb, p->body.irep); } } break; case MRB_TT_RANGE: mrb_free(mrb, ((struct RRange*)obj)->edges); break; case MRB_TT_DATA: { struct RData *d = (struct RData*)obj; if (d->type && d->type->dfree) { d->type->dfree(mrb, d->data); } mrb_gc_free_iv(mrb, (struct RObject*)obj); } break; default: break; } obj->tt = MRB_TT_FREE; } static void root_scan_phase(mrb_state *mrb, mrb_gc *gc) { size_t i, e; if (!is_minor_gc(gc)) { gc->gray_list = NULL; gc->atomic_gray_list = NULL; } mrb_gc_mark_gv(mrb); /* mark arena */ for (i=0,e=gc->arena_idx; i<e; i++) { mrb_gc_mark(mrb, gc->arena[i]); } /* mark class hierarchy */ mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class); /* mark built-in classes */ mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module); mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class); mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class); /* mark top_self */ mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self); /* mark exception */ mrb_gc_mark(mrb, (struct RBasic*)mrb->exc); /* mark backtrace */ mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.exc); e = (size_t)mrb->backtrace.n; for (i=0; i<e; i++) { mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.entries[i].klass); } /* mark pre-allocated exception */ mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err); mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err); #ifdef MRB_GC_FIXED_ARENA mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err); #endif mark_context(mrb, mrb->root_c); if (mrb->root_c->fib) { mrb_gc_mark(mrb, (struct RBasic*)mrb->root_c->fib); } if (mrb->root_c != mrb->c) { mark_context(mrb, mrb->c); } } static size_t gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) { size_t children = 0; gc_mark_children(mrb, gc, obj); switch (obj->tt) { case MRB_TT_ICLASS: children++; break; case MRB_TT_CLASS: case MRB_TT_SCLASS: case MRB_TT_MODULE: { struct RClass *c = (struct RClass*)obj; children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj); children += mrb_gc_mark_mt_size(mrb, c); children++; } break; case MRB_TT_OBJECT: case MRB_TT_DATA: case MRB_TT_EXCEPTION: children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj); break; case MRB_TT_ENV: children += (int)obj->flags; break; case MRB_TT_FIBER: { struct mrb_context *c = ((struct RFiber*)obj)->cxt; size_t i; mrb_callinfo *ci; if (!c) break; /* mark stack */ i = c->stack - c->stbase; if (c->ci) i += c->ci->nregs; if (c->stbase + i > c->stend) i = c->stend - c->stbase; children += i; /* mark ensure stack */ children += (c->ci) ? c->ci->eidx : 0; /* mark closure */ if (c->cibase) { for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++) ; } children += i; } break; case MRB_TT_ARRAY: { struct RArray *a = (struct RArray*)obj; children += a->len; } break; case MRB_TT_HASH: children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj); children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj); break; case MRB_TT_PROC: case MRB_TT_RANGE: children+=2; break; default: break; } return children; } static void gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) { while (gc->gray_list) { if (is_gray(gc->gray_list)) gc_mark_children(mrb, gc, gc->gray_list); else gc->gray_list = gc->gray_list->gcnext; } } static size_t incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit) { size_t tried_marks = 0; while (gc->gray_list && tried_marks < limit) { tried_marks += gc_gray_mark(mrb, gc, gc->gray_list); } return tried_marks; } static void final_marking_phase(mrb_state *mrb, mrb_gc *gc) { mark_context_stack(mrb, mrb->root_c); gc_mark_gray_list(mrb, gc); mrb_assert(gc->gray_list == NULL); gc->gray_list = gc->atomic_gray_list; gc->atomic_gray_list = NULL; gc_mark_gray_list(mrb, gc); mrb_assert(gc->gray_list == NULL); } static void prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc) { gc->state = MRB_GC_STATE_SWEEP; gc->sweeps = gc->heaps; gc->live_after_mark = gc->live; } static size_t incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit) { mrb_heap_page *page = gc->sweeps; size_t tried_sweep = 0; while (page && (tried_sweep < limit)) { RVALUE *p = objects(page); RVALUE *e = p + MRB_HEAP_PAGE_SIZE; size_t freed = 0; mrb_bool dead_slot = TRUE; mrb_bool full = (page->freelist == NULL); if (is_minor_gc(gc) && page->old) { /* skip a slot which doesn't contain any young object */ p = e; dead_slot = FALSE; } while (p<e) { if (is_dead(gc, &p->as.basic)) { if (p->as.basic.tt != MRB_TT_FREE) { obj_free(mrb, &p->as.basic, FALSE); if (p->as.basic.tt == MRB_TT_FREE) { p->as.free.next = page->freelist; page->freelist = (struct RBasic*)p; freed++; } else { dead_slot = FALSE; } } } else { if (!is_generational(gc)) paint_partial_white(gc, &p->as.basic); /* next gc target */ dead_slot = FALSE; } p++; } /* free dead slot */ if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) { mrb_heap_page *next = page->next; unlink_heap_page(gc, page); unlink_free_heap_page(gc, page); mrb_free(mrb, page); page = next; } else { if (full && freed > 0) { link_free_heap_page(gc, page); } if (page->freelist == NULL && is_minor_gc(gc)) page->old = TRUE; else page->old = FALSE; page = page->next; } tried_sweep += MRB_HEAP_PAGE_SIZE; gc->live -= freed; gc->live_after_mark -= freed; } gc->sweeps = page; return tried_sweep; } static size_t incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit) { switch (gc->state) { case MRB_GC_STATE_ROOT: root_scan_phase(mrb, gc); gc->state = MRB_GC_STATE_MARK; flip_white_part(gc); return 0; case MRB_GC_STATE_MARK: if (gc->gray_list) { return incremental_marking_phase(mrb, gc, limit); } else { final_marking_phase(mrb, gc); prepare_incremental_sweep(mrb, gc); return 0; } case MRB_GC_STATE_SWEEP: { size_t tried_sweep = 0; tried_sweep = incremental_sweep_phase(mrb, gc, limit); if (tried_sweep == 0) gc->state = MRB_GC_STATE_ROOT; return tried_sweep; } default: /* unknown state */ mrb_assert(0); return 0; } } static void incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state) { do { incremental_gc(mrb, gc, SIZE_MAX); } while (gc->state != to_state); } static void incremental_gc_step(mrb_state *mrb, mrb_gc *gc) { size_t limit = 0, result = 0; limit = (GC_STEP_SIZE/100) * gc->step_ratio; while (result < limit) { result += incremental_gc(mrb, gc, limit); if (gc->state == MRB_GC_STATE_ROOT) break; } gc->threshold = gc->live + GC_STEP_SIZE; } static void clear_all_old(mrb_state *mrb, mrb_gc *gc) { mrb_bool origin_mode = gc->generational; mrb_assert(is_generational(gc)); if (is_major_gc(gc)) { /* finish the half baked GC */ incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT); } /* Sweep the dead objects, then reset all the live objects * (including all the old objects, of course) to white. */ gc->generational = FALSE; prepare_incremental_sweep(mrb, gc); incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT); gc->generational = origin_mode; /* The gray objects have already been painted as white */ gc->atomic_gray_list = gc->gray_list = NULL; } MRB_API void mrb_incremental_gc(mrb_state *mrb) { mrb_gc *gc = &mrb->gc; if (gc->disabled) return; GC_INVOKE_TIME_REPORT("mrb_incremental_gc()"); GC_TIME_START; if (is_minor_gc(gc)) { incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT); } else { incremental_gc_step(mrb, gc); } if (gc->state == MRB_GC_STATE_ROOT) { mrb_assert(gc->live >= gc->live_after_mark); gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio; if (gc->threshold < GC_STEP_SIZE) { gc->threshold = GC_STEP_SIZE; } if (is_major_gc(gc)) { gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; gc->full = FALSE; } else if (is_minor_gc(gc)) { if (gc->live > gc->majorgc_old_threshold) { clear_all_old(mrb, gc); gc->full = TRUE; } } } GC_TIME_STOP_AND_REPORT; } /* Perform a full gc cycle */ MRB_API void mrb_full_gc(mrb_state *mrb) { mrb_gc *gc = &mrb->gc; if (gc->disabled) return; GC_INVOKE_TIME_REPORT("mrb_full_gc()"); GC_TIME_START; if (is_generational(gc)) { /* clear all the old objects back to young */ clear_all_old(mrb, gc); gc->full = TRUE; } else if (gc->state != MRB_GC_STATE_ROOT) { /* finish half baked GC cycle */ incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT); } incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT); gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio; if (is_generational(gc)) { gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; gc->full = FALSE; } GC_TIME_STOP_AND_REPORT; } MRB_API void mrb_garbage_collect(mrb_state *mrb) { mrb_full_gc(mrb); } MRB_API int mrb_gc_arena_save(mrb_state *mrb) { return mrb->gc.arena_idx; } MRB_API void mrb_gc_arena_restore(mrb_state *mrb, int idx) { mrb_gc *gc = &mrb->gc; #ifndef MRB_GC_FIXED_ARENA int capa = gc->arena_capa; if (idx < capa / 2) { capa = (int)(capa * 0.66); if (capa < MRB_GC_ARENA_SIZE) { capa = MRB_GC_ARENA_SIZE; } if (capa != gc->arena_capa) { gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa); gc->arena_capa = capa; } } #endif gc->arena_idx = idx; } /* * Field write barrier * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray). */ MRB_API void mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value) { mrb_gc *gc = &mrb->gc; if (!is_black(obj)) return; if (!is_white(value)) return; mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj))); mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT); if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) { add_gray_list(mrb, gc, value); } else { mrb_assert(gc->state == MRB_GC_STATE_SWEEP); paint_partial_white(gc, obj); /* for never write barriers */ } } /* * Write barrier * Paint obj(Black) to obj(Gray). * * The object that is painted gray will be traversed atomically in final * mark phase. So you use this write barrier if it's frequency written spot. * e.g. Set element on Array. */ MRB_API void mrb_write_barrier(mrb_state *mrb, struct RBasic *obj) { mrb_gc *gc = &mrb->gc; if (!is_black(obj)) return; mrb_assert(!is_dead(gc, obj)); mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT); paint_gray(obj); obj->gcnext = gc->atomic_gray_list; gc->atomic_gray_list = obj; } /* * call-seq: * GC.start -> nil * * Initiates full garbage collection. * */ static mrb_value gc_start(mrb_state *mrb, mrb_value obj) { mrb_full_gc(mrb); return mrb_nil_value(); } /* * call-seq: * GC.enable -> true or false * * Enables garbage collection, returning <code>true</code> if garbage * collection was previously disabled. * * GC.disable #=> false * GC.enable #=> true * GC.enable #=> false * */ static mrb_value gc_enable(mrb_state *mrb, mrb_value obj) { mrb_bool old = mrb->gc.disabled; mrb->gc.disabled = FALSE; return mrb_bool_value(old); } /* * call-seq: * GC.disable -> true or false * * Disables garbage collection, returning <code>true</code> if garbage * collection was already disabled. * * GC.disable #=> false * GC.disable #=> true * */ static mrb_value gc_disable(mrb_state *mrb, mrb_value obj) { mrb_bool old = mrb->gc.disabled; mrb->gc.disabled = TRUE; return mrb_bool_value(old); } /* * call-seq: * GC.interval_ratio -> fixnum * * Returns ratio of GC interval. Default value is 200(%). * */ static mrb_value gc_interval_ratio_get(mrb_state *mrb, mrb_value obj) { return mrb_fixnum_value(mrb->gc.interval_ratio); } /* * call-seq: * GC.interval_ratio = fixnum -> nil * * Updates ratio of GC interval. Default value is 200(%). * GC start as soon as after end all step of GC if you set 100(%). * */ static mrb_value gc_interval_ratio_set(mrb_state *mrb, mrb_value obj) { mrb_int ratio; mrb_get_args(mrb, "i", &ratio); mrb->gc.interval_ratio = ratio; return mrb_nil_value(); } /* * call-seq: * GC.step_ratio -> fixnum * * Returns step span ratio of Incremental GC. Default value is 200(%). * */ static mrb_value gc_step_ratio_get(mrb_state *mrb, mrb_value obj) { return mrb_fixnum_value(mrb->gc.step_ratio); } /* * call-seq: * GC.step_ratio = fixnum -> nil * * Updates step span ratio of Incremental GC. Default value is 200(%). * 1 step of incrementalGC becomes long if a rate is big. * */ static mrb_value gc_step_ratio_set(mrb_state *mrb, mrb_value obj) { mrb_int ratio; mrb_get_args(mrb, "i", &ratio); mrb->gc.step_ratio = ratio; return mrb_nil_value(); } static void change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable) { if (is_generational(gc) && !enable) { clear_all_old(mrb, gc); mrb_assert(gc->state == MRB_GC_STATE_ROOT); gc->full = FALSE; } else if (!is_generational(gc) && enable) { incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT); gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; gc->full = FALSE; } gc->generational = enable; } /* * call-seq: * GC.generational_mode -> true or false * * Returns generational or normal gc mode. * */ static mrb_value gc_generational_mode_get(mrb_state *mrb, mrb_value self) { return mrb_bool_value(mrb->gc.generational); } /* * call-seq: * GC.generational_mode = true or false -> true or false * * Changes to generational or normal gc mode. * */ static mrb_value gc_generational_mode_set(mrb_state *mrb, mrb_value self) { mrb_bool enable; mrb_get_args(mrb, "b", &enable); if (mrb->gc.generational != enable) change_gen_gc_mode(mrb, &mrb->gc, enable); return mrb_bool_value(enable); } static void gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data) { mrb_heap_page* page = gc->heaps; while (page != NULL) { RVALUE *p, *pend; p = objects(page); pend = p + MRB_HEAP_PAGE_SIZE; for (;p < pend; p++) { (*callback)(mrb, &p->as.basic, data); } page = page->next; } } void mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data) { gc_each_objects(mrb, &mrb->gc, callback, data); } #ifdef GC_TEST #ifdef GC_DEBUG static mrb_value gc_test(mrb_state *, mrb_value); #endif #endif void mrb_init_gc(mrb_state *mrb) { struct RClass *gc; gc = mrb_define_module(mrb, "GC"); mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE()); #ifdef GC_TEST #ifdef GC_DEBUG mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE()); #endif #endif } #ifdef GC_TEST #ifdef GC_DEBUG void test_mrb_field_write_barrier(void) { mrb_state *mrb = mrb_open(); struct RBasic *obj, *value; mrb_gc *gc = &mrb->gc; puts("test_mrb_field_write_barrier"); gc->generational = FALSE; obj = mrb_basic_ptr(mrb_ary_new(mrb)); value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value")); paint_black(obj); paint_partial_white(gc, value); puts(" in MRB_GC_STATE_MARK"); gc->state = MRB_GC_STATE_MARK; mrb_field_write_barrier(mrb, obj, value); mrb_assert(is_gray(value)); puts(" in MRB_GC_STATE_SWEEP"); paint_partial_white(gc, value); gc->state = MRB_GC_STATE_SWEEP; mrb_field_write_barrier(mrb, obj, value); mrb_assert(obj->color & gc->current_white_part); mrb_assert(value->color & gc->current_white_part); puts(" fail with black"); gc->state = MRB_GC_STATE_MARK; paint_white(obj); paint_partial_white(gc, value); mrb_field_write_barrier(mrb, obj, value); mrb_assert(obj->color & gc->current_white_part); puts(" fail with gray"); gc->state = MRB_GC_STATE_MARK; paint_black(obj); paint_gray(value); mrb_field_write_barrier(mrb, obj, value); mrb_assert(is_gray(value)); { puts("test_mrb_field_write_barrier_value"); obj = mrb_basic_ptr(mrb_ary_new(mrb)); mrb_value value = mrb_str_new_lit(mrb, "value"); paint_black(obj); paint_partial_white(gc, mrb_basic_ptr(value)); gc->state = MRB_GC_STATE_MARK; mrb_field_write_barrier_value(mrb, obj, value); mrb_assert(is_gray(mrb_basic_ptr(value))); } mrb_close(mrb); } void test_mrb_write_barrier(void) { mrb_state *mrb = mrb_open(); struct RBasic *obj; mrb_gc *gc = &mrb->gc; puts("test_mrb_write_barrier"); obj = mrb_basic_ptr(mrb_ary_new(mrb)); paint_black(obj); puts(" in MRB_GC_STATE_MARK"); gc->state = MRB_GC_STATE_MARK; mrb_write_barrier(mrb, obj); mrb_assert(is_gray(obj)); mrb_assert(gc->atomic_gray_list == obj); puts(" fail with gray"); paint_gray(obj); mrb_write_barrier(mrb, obj); mrb_assert(is_gray(obj)); mrb_close(mrb); } void test_add_gray_list(void) { mrb_state *mrb = mrb_open(); struct RBasic *obj1, *obj2; mrb_gc *gc = &mrb->gc; puts("test_add_gray_list"); change_gen_gc_mode(mrb, gc, FALSE); mrb_assert(gc->gray_list == NULL); obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test")); add_gray_list(mrb, gc, obj1); mrb_assert(gc->gray_list == obj1); mrb_assert(is_gray(obj1)); obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test")); add_gray_list(mrb, gc, obj2); mrb_assert(gc->gray_list == obj2); mrb_assert(gc->gray_list->gcnext == obj1); mrb_assert(is_gray(obj2)); mrb_close(mrb); } void test_gc_gray_mark(void) { mrb_state *mrb = mrb_open(); mrb_value obj_v, value_v; struct RBasic *obj; size_t gray_num = 0; mrb_gc *gc = &mrb->gc; puts("test_gc_gray_mark"); puts(" in MRB_TT_CLASS"); obj = (struct RBasic*)mrb->object_class; paint_gray(obj); gray_num = gc_gray_mark(mrb, gc, obj); mrb_assert(is_black(obj)); mrb_assert(gray_num > 1); puts(" in MRB_TT_ARRAY"); obj_v = mrb_ary_new(mrb); value_v = mrb_str_new_lit(mrb, "test"); paint_gray(mrb_basic_ptr(obj_v)); paint_partial_white(gc, mrb_basic_ptr(value_v)); mrb_ary_push(mrb, obj_v, value_v); gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v)); mrb_assert(is_black(mrb_basic_ptr(obj_v))); mrb_assert(is_gray(mrb_basic_ptr(value_v))); mrb_assert(gray_num == 1); mrb_close(mrb); } void test_incremental_gc(void) { mrb_state *mrb = mrb_open(); size_t max = ~0, live = 0, total = 0, freed = 0; RVALUE *free; mrb_heap_page *page; mrb_gc *gc = &mrb->gc; puts("test_incremental_gc"); change_gen_gc_mode(mrb, gc, FALSE); puts(" in mrb_full_gc"); mrb_full_gc(mrb); mrb_assert(gc->state == MRB_GC_STATE_ROOT); puts(" in MRB_GC_STATE_ROOT"); incremental_gc(mrb, gc, max); mrb_assert(gc->state == MRB_GC_STATE_MARK); puts(" in MRB_GC_STATE_MARK"); incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP); mrb_assert(gc->state == MRB_GC_STATE_SWEEP); puts(" in MRB_GC_STATE_SWEEP"); page = gc->heaps; while (page) { RVALUE *p = objects(page); RVALUE *e = p + MRB_HEAP_PAGE_SIZE; while (p<e) { if (is_black(&p->as.basic)) { live++; } if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) { printf("%p\n", &p->as.basic); } p++; } page = page->next; total += MRB_HEAP_PAGE_SIZE; } mrb_assert(gc->gray_list == NULL); incremental_gc(mrb, gc, max); mrb_assert(gc->state == MRB_GC_STATE_SWEEP); incremental_gc(mrb, gc, max); mrb_assert(gc->state == MRB_GC_STATE_ROOT); free = (RVALUE*)gc->heaps->freelist; while (free) { freed++; free = (RVALUE*)free->as.free.next; } mrb_assert(gc->live == live); mrb_assert(gc->live == total-freed); puts("test_incremental_gc(gen)"); incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP); change_gen_gc_mode(mrb, gc, TRUE); mrb_assert(gc->full == FALSE); mrb_assert(gc->state == MRB_GC_STATE_ROOT); puts(" in minor"); mrb_assert(is_minor_gc(gc)); mrb_assert(gc->majorgc_old_threshold > 0); gc->majorgc_old_threshold = 0; mrb_incremental_gc(mrb); mrb_assert(gc->full == TRUE); mrb_assert(gc->state == MRB_GC_STATE_ROOT); puts(" in major"); mrb_assert(is_major_gc(gc)); do { mrb_incremental_gc(mrb); } while (gc->state != MRB_GC_STATE_ROOT); mrb_assert(gc->full == FALSE); mrb_close(mrb); } void test_incremental_sweep_phase(void) { mrb_state *mrb = mrb_open(); mrb_gc *gc = &mrb->gc; puts("test_incremental_sweep_phase"); add_heap(mrb, gc); gc->sweeps = gc->heaps; mrb_assert(gc->heaps->next->next == NULL); mrb_assert(gc->free_heaps->next->next == NULL); incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3); mrb_assert(gc->heaps->next == NULL); mrb_assert(gc->heaps == gc->free_heaps); mrb_close(mrb); } static mrb_value gc_test(mrb_state *mrb, mrb_value self) { test_mrb_field_write_barrier(); test_mrb_write_barrier(); test_add_gray_list(); test_gc_gray_mark(); test_incremental_gc(); test_incremental_sweep_phase(); return mrb_nil_value(); } #endif /* GC_DEBUG */ #endif /* GC_TEST */
./CrossVul/dataset_final_sorted/CWE-416/c/good_3400_0
crossvul-cpp_data_good_1006_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-416/c/good_1006_0
crossvul-cpp_data_good_2440_0
/* * core.c -- Voltage/Current Regulator framework. * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * Copyright 2008 SlimLogic Ltd. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/async.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/module.h> #define CREATE_TRACE_POINTS #include <trace/events/regulator.h> #include "dummy.h" #include "internal.h" #define rdev_crit(rdev, fmt, ...) \ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_err(rdev, fmt, ...) \ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_warn(rdev, fmt, ...) \ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_info(rdev, fmt, ...) \ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_dbg(rdev, fmt, ...) \ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) static DEFINE_MUTEX(regulator_list_mutex); static LIST_HEAD(regulator_list); static LIST_HEAD(regulator_map_list); static LIST_HEAD(regulator_ena_gpio_list); static LIST_HEAD(regulator_supply_alias_list); static bool has_full_constraints; static struct dentry *debugfs_root; /* * struct regulator_map * * Used to provide symbolic supply names to devices. */ struct regulator_map { struct list_head list; const char *dev_name; /* The dev_name() for the consumer */ const char *supply; struct regulator_dev *regulator; }; /* * struct regulator_enable_gpio * * Management for shared enable GPIO pin */ struct regulator_enable_gpio { struct list_head list; struct gpio_desc *gpiod; u32 enable_count; /* a number of enabled shared GPIO */ u32 request_count; /* a number of requested shared GPIO */ unsigned int ena_gpio_invert:1; }; /* * struct regulator_supply_alias * * Used to map lookups for a supply onto an alternative device. */ struct regulator_supply_alias { struct list_head list; struct device *src_dev; const char *src_supply; struct device *alias_dev; const char *alias_supply; }; static int _regulator_is_enabled(struct regulator_dev *rdev); static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); static const char *rdev_get_name(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->name) return rdev->constraints->name; else if (rdev->desc->name) return rdev->desc->name; else return ""; } static bool have_full_constraints(void) { return has_full_constraints || of_have_populated_dt(); } /** * of_get_regulator - get a regulator device node based on supply name * @dev: Device pointer for the consumer (of regulator) device * @supply: regulator supply name * * Extract the regulator device node corresponding to the supply name. * returns the device node corresponding to the regulator if found, else * returns NULL. */ static struct device_node *of_get_regulator(struct device *dev, const char *supply) { struct device_node *regnode = NULL; char prop_name[32]; /* 32 is max size of property name */ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply); snprintf(prop_name, 32, "%s-supply", supply); regnode = of_parse_phandle(dev->of_node, prop_name, 0); if (!regnode) { dev_dbg(dev, "Looking up %s property in node %s failed", prop_name, dev->of_node->full_name); return NULL; } return regnode; } static int _regulator_can_change_status(struct regulator_dev *rdev) { if (!rdev->constraints) return 0; if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS) return 1; else return 0; } /* Platform voltage constraint check */ static int regulator_check_voltage(struct regulator_dev *rdev, int *min_uV, int *max_uV) { BUG_ON(*min_uV > *max_uV); if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } if (*max_uV > rdev->constraints->max_uV) *max_uV = rdev->constraints->max_uV; if (*min_uV < rdev->constraints->min_uV) *min_uV = rdev->constraints->min_uV; if (*min_uV > *max_uV) { rdev_err(rdev, "unsupportable voltage range: %d-%duV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* Make sure we select a voltage that suits the needs of all * regulator consumers */ static int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV) { struct regulator *regulator; list_for_each_entry(regulator, &rdev->consumer_list, list) { /* * Assume consumers that didn't say anything are OK * with anything in the constraint range. */ if (!regulator->min_uV && !regulator->max_uV) continue; if (*max_uV > regulator->max_uV) *max_uV = regulator->max_uV; if (*min_uV < regulator->min_uV) *min_uV = regulator->min_uV; } if (*min_uV > *max_uV) { rdev_err(rdev, "Restricting voltage, %u-%uuV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* current constraint check */ static int regulator_check_current_limit(struct regulator_dev *rdev, int *min_uA, int *max_uA) { BUG_ON(*min_uA > *max_uA); if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } if (*max_uA > rdev->constraints->max_uA) *max_uA = rdev->constraints->max_uA; if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; if (*min_uA > *max_uA) { rdev_err(rdev, "unsupportable current range: %d-%duA\n", *min_uA, *max_uA); return -EINVAL; } return 0; } /* operating mode constraint check */ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) { switch (*mode) { case REGULATOR_MODE_FAST: case REGULATOR_MODE_NORMAL: case REGULATOR_MODE_IDLE: case REGULATOR_MODE_STANDBY: break; default: rdev_err(rdev, "invalid mode %x specified\n", *mode); return -EINVAL; } if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } /* The modes are bitmasks, the most power hungry modes having * the lowest values. If the requested mode isn't supported * try higher modes. */ while (*mode) { if (rdev->constraints->valid_modes_mask & *mode) return 0; *mode /= 2; } return -EINVAL; } /* dynamic regulator mode switching constraint check */ static int regulator_check_drms(struct regulator_dev *rdev) { if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } return 0; } static ssize_t regulator_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; mutex_lock(&rdev->mutex); ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev)); mutex_unlock(&rdev->mutex); return ret; } static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL); static ssize_t regulator_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev)); } static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", rdev_get_name(rdev)); } static DEVICE_ATTR_RO(name); static ssize_t regulator_print_opmode(char *buf, int mode) { switch (mode) { case REGULATOR_MODE_FAST: return sprintf(buf, "fast\n"); case REGULATOR_MODE_NORMAL: return sprintf(buf, "normal\n"); case REGULATOR_MODE_IDLE: return sprintf(buf, "idle\n"); case REGULATOR_MODE_STANDBY: return sprintf(buf, "standby\n"); } return sprintf(buf, "unknown\n"); } static ssize_t regulator_opmode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, _regulator_get_mode(rdev)); } static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL); static ssize_t regulator_print_state(char *buf, int state) { if (state > 0) return sprintf(buf, "enabled\n"); else if (state == 0) return sprintf(buf, "disabled\n"); else return sprintf(buf, "unknown\n"); } static ssize_t regulator_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; mutex_lock(&rdev->mutex); ret = regulator_print_state(buf, _regulator_is_enabled(rdev)); mutex_unlock(&rdev->mutex); return ret; } static DEVICE_ATTR(state, 0444, regulator_state_show, NULL); static ssize_t regulator_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); int status; char *label; status = rdev->desc->ops->get_status(rdev); if (status < 0) return status; switch (status) { case REGULATOR_STATUS_OFF: label = "off"; break; case REGULATOR_STATUS_ON: label = "on"; break; case REGULATOR_STATUS_ERROR: label = "error"; break; case REGULATOR_STATUS_FAST: label = "fast"; break; case REGULATOR_STATUS_NORMAL: label = "normal"; break; case REGULATOR_STATUS_IDLE: label = "idle"; break; case REGULATOR_STATUS_STANDBY: label = "standby"; break; case REGULATOR_STATUS_BYPASS: label = "bypass"; break; case REGULATOR_STATUS_UNDEFINED: label = "undefined"; break; default: return -ERANGE; } return sprintf(buf, "%s\n", label); } static DEVICE_ATTR(status, 0444, regulator_status_show, NULL); static ssize_t regulator_min_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uA); } static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL); static ssize_t regulator_max_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uA); } static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL); static ssize_t regulator_min_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uV); } static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL); static ssize_t regulator_max_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uV); } static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL); static ssize_t regulator_total_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); struct regulator *regulator; int uA = 0; mutex_lock(&rdev->mutex); list_for_each_entry(regulator, &rdev->consumer_list, list) uA += regulator->uA_load; mutex_unlock(&rdev->mutex); return sprintf(buf, "%d\n", uA); } static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL); static ssize_t num_users_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->use_count); } static DEVICE_ATTR_RO(num_users); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); switch (rdev->desc->type) { case REGULATOR_VOLTAGE: return sprintf(buf, "voltage\n"); case REGULATOR_CURRENT: return sprintf(buf, "current\n"); } return sprintf(buf, "unknown\n"); } static DEVICE_ATTR_RO(type); static ssize_t regulator_suspend_mem_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV); } static DEVICE_ATTR(suspend_mem_microvolts, 0444, regulator_suspend_mem_uV_show, NULL); static ssize_t regulator_suspend_disk_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV); } static DEVICE_ATTR(suspend_disk_microvolts, 0444, regulator_suspend_disk_uV_show, NULL); static ssize_t regulator_suspend_standby_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV); } static DEVICE_ATTR(suspend_standby_microvolts, 0444, regulator_suspend_standby_uV_show, NULL); static ssize_t regulator_suspend_mem_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_mem.mode); } static DEVICE_ATTR(suspend_mem_mode, 0444, regulator_suspend_mem_mode_show, NULL); static ssize_t regulator_suspend_disk_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_disk.mode); } static DEVICE_ATTR(suspend_disk_mode, 0444, regulator_suspend_disk_mode_show, NULL); static ssize_t regulator_suspend_standby_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_standby.mode); } static DEVICE_ATTR(suspend_standby_mode, 0444, regulator_suspend_standby_mode_show, NULL); static ssize_t regulator_suspend_mem_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_mem.enabled); } static DEVICE_ATTR(suspend_mem_state, 0444, regulator_suspend_mem_state_show, NULL); static ssize_t regulator_suspend_disk_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_disk.enabled); } static DEVICE_ATTR(suspend_disk_state, 0444, regulator_suspend_disk_state_show, NULL); static ssize_t regulator_suspend_standby_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_standby.enabled); } static DEVICE_ATTR(suspend_standby_state, 0444, regulator_suspend_standby_state_show, NULL); static ssize_t regulator_bypass_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); const char *report; bool bypass; int ret; ret = rdev->desc->ops->get_bypass(rdev, &bypass); if (ret != 0) report = "unknown"; else if (bypass) report = "enabled"; else report = "disabled"; return sprintf(buf, "%s\n", report); } static DEVICE_ATTR(bypass, 0444, regulator_bypass_show, NULL); /* * These are the only attributes are present for all regulators. * Other attributes are a function of regulator functionality. */ static struct attribute *regulator_dev_attrs[] = { &dev_attr_name.attr, &dev_attr_num_users.attr, &dev_attr_type.attr, NULL, }; ATTRIBUTE_GROUPS(regulator_dev); static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev); kfree(rdev); } static struct class regulator_class = { .name = "regulator", .dev_release = regulator_dev_release, .dev_groups = regulator_dev_groups, }; /* Calculate the new optimum regulator operating mode based on the new total * consumer load. All locks held by caller */ static void drms_uA_update(struct regulator_dev *rdev) { struct regulator *sibling; int current_uA = 0, output_uV, input_uV, err; unsigned int mode; err = regulator_check_drms(rdev); if (err < 0 || !rdev->desc->ops->get_optimum_mode || (!rdev->desc->ops->get_voltage && !rdev->desc->ops->get_voltage_sel) || !rdev->desc->ops->set_mode) return; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) return; /* get input voltage */ input_uV = 0; if (rdev->supply) input_uV = regulator_get_voltage(rdev->supply); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) return; /* calc total requested load */ list_for_each_entry(sibling, &rdev->consumer_list, list) current_uA += sibling->uA_load; /* now get the optimum mode for our new total regulator load */ mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, current_uA); /* check the new mode is allowed */ err = regulator_mode_constrain(rdev, &mode); if (err == 0) rdev->desc->ops->set_mode(rdev, mode); } static int suspend_set_state(struct regulator_dev *rdev, struct regulator_state *rstate) { int ret = 0; /* If we have no suspend mode configration don't set anything; * only warn if the driver implements set_suspend_voltage or * set_suspend_mode callback. */ if (!rstate->enabled && !rstate->disabled) { if (rdev->desc->ops->set_suspend_voltage || rdev->desc->ops->set_suspend_mode) rdev_warn(rdev, "No configuration\n"); return 0; } if (rstate->enabled && rstate->disabled) { rdev_err(rdev, "invalid configuration\n"); return -EINVAL; } if (rstate->enabled && rdev->desc->ops->set_suspend_enable) ret = rdev->desc->ops->set_suspend_enable(rdev); else if (rstate->disabled && rdev->desc->ops->set_suspend_disable) ret = rdev->desc->ops->set_suspend_disable(rdev); else /* OK if set_suspend_enable or set_suspend_disable is NULL */ ret = 0; if (ret < 0) { rdev_err(rdev, "failed to enabled/disable\n"); return ret; } if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) { ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV); if (ret < 0) { rdev_err(rdev, "failed to set voltage\n"); return ret; } } if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) { ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode); if (ret < 0) { rdev_err(rdev, "failed to set mode\n"); return ret; } } return ret; } /* locks held by caller */ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) { if (!rdev->constraints) return -EINVAL; switch (state) { case PM_SUSPEND_STANDBY: return suspend_set_state(rdev, &rdev->constraints->state_standby); case PM_SUSPEND_MEM: return suspend_set_state(rdev, &rdev->constraints->state_mem); case PM_SUSPEND_MAX: return suspend_set_state(rdev, &rdev->constraints->state_disk); default: return -EINVAL; } } static void print_constraints(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; char buf[80] = ""; int count = 0; int ret; if (constraints->min_uV && constraints->max_uV) { if (constraints->min_uV == constraints->max_uV) count += sprintf(buf + count, "%d mV ", constraints->min_uV / 1000); else count += sprintf(buf + count, "%d <--> %d mV ", constraints->min_uV / 1000, constraints->max_uV / 1000); } if (!constraints->min_uV || constraints->min_uV != constraints->max_uV) { ret = _regulator_get_voltage(rdev); if (ret > 0) count += sprintf(buf + count, "at %d mV ", ret / 1000); } if (constraints->uV_offset) count += sprintf(buf, "%dmV offset ", constraints->uV_offset / 1000); if (constraints->min_uA && constraints->max_uA) { if (constraints->min_uA == constraints->max_uA) count += sprintf(buf + count, "%d mA ", constraints->min_uA / 1000); else count += sprintf(buf + count, "%d <--> %d mA ", constraints->min_uA / 1000, constraints->max_uA / 1000); } if (!constraints->min_uA || constraints->min_uA != constraints->max_uA) { ret = _regulator_get_current_limit(rdev); if (ret > 0) count += sprintf(buf + count, "at %d mA ", ret / 1000); } if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) count += sprintf(buf + count, "fast "); if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL) count += sprintf(buf + count, "normal "); if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE) count += sprintf(buf + count, "idle "); if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) count += sprintf(buf + count, "standby"); if (!count) sprintf(buf, "no parameters"); rdev_dbg(rdev, "%s\n", buf); if ((constraints->min_uV != constraints->max_uV) && !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) rdev_warn(rdev, "Voltage range but no REGULATOR_CHANGE_VOLTAGE\n"); } static int machine_constraints_voltage(struct regulator_dev *rdev, struct regulation_constraints *constraints) { const struct regulator_ops *ops = rdev->desc->ops; int ret; /* do we need to apply the constraint voltage */ if (rdev->constraints->apply_uV && rdev->constraints->min_uV == rdev->constraints->max_uV) { int current_uV = _regulator_get_voltage(rdev); if (current_uV < 0) { rdev_err(rdev, "failed to get the current voltage(%d)\n", current_uV); return current_uV; } if (current_uV < rdev->constraints->min_uV || current_uV > rdev->constraints->max_uV) { ret = _regulator_do_set_voltage( rdev, rdev->constraints->min_uV, rdev->constraints->max_uV); if (ret < 0) { rdev_err(rdev, "failed to apply %duV constraint(%d)\n", rdev->constraints->min_uV, ret); return ret; } } } /* constrain machine-level voltage specs to fit * the actual range supported by this regulator. */ if (ops->list_voltage && rdev->desc->n_voltages) { int count = rdev->desc->n_voltages; int i; int min_uV = INT_MAX; int max_uV = INT_MIN; int cmin = constraints->min_uV; int cmax = constraints->max_uV; /* it's safe to autoconfigure fixed-voltage supplies and the constraints are used by list_voltage. */ if (count == 1 && !cmin) { cmin = 1; cmax = INT_MAX; constraints->min_uV = cmin; constraints->max_uV = cmax; } /* voltage constraints are optional */ if ((cmin == 0) && (cmax == 0)) return 0; /* else require explicit machine-level constraints */ if (cmin <= 0 || cmax <= 0 || cmax < cmin) { rdev_err(rdev, "invalid voltage constraints\n"); return -EINVAL; } /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ for (i = 0; i < count; i++) { int value; value = ops->list_voltage(rdev, i); if (value <= 0) continue; /* maybe adjust [min_uV..max_uV] */ if (value >= cmin && value < min_uV) min_uV = value; if (value <= cmax && value > max_uV) max_uV = value; } /* final: [min_uV..max_uV] valid iff constraints valid */ if (max_uV < min_uV) { rdev_err(rdev, "unsupportable voltage constraints %u-%uuV\n", min_uV, max_uV); return -EINVAL; } /* use regulator's subset of machine constraints */ if (constraints->min_uV < min_uV) { rdev_dbg(rdev, "override min_uV, %d -> %d\n", constraints->min_uV, min_uV); constraints->min_uV = min_uV; } if (constraints->max_uV > max_uV) { rdev_dbg(rdev, "override max_uV, %d -> %d\n", constraints->max_uV, max_uV); constraints->max_uV = max_uV; } } return 0; } static int machine_constraints_current(struct regulator_dev *rdev, struct regulation_constraints *constraints) { const struct regulator_ops *ops = rdev->desc->ops; int ret; if (!constraints->min_uA && !constraints->max_uA) return 0; if (constraints->min_uA > constraints->max_uA) { rdev_err(rdev, "Invalid current constraints\n"); return -EINVAL; } if (!ops->set_current_limit || !ops->get_current_limit) { rdev_warn(rdev, "Operation of current configuration missing\n"); return 0; } /* Set regulator current in constraints range */ ret = ops->set_current_limit(rdev, constraints->min_uA, constraints->max_uA); if (ret < 0) { rdev_err(rdev, "Failed to set current constraint, %d\n", ret); return ret; } return 0; } static int _regulator_do_enable(struct regulator_dev *rdev); /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source * @constraints: constraints to apply * * Allows platform initialisation code to define and constrain * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: * Constraints *must* be set by platform code in order for some * regulator operations to proceed i.e. set_voltage, set_current_limit, * set_mode. */ static int set_machine_constraints(struct regulator_dev *rdev, const struct regulation_constraints *constraints) { int ret = 0; const struct regulator_ops *ops = rdev->desc->ops; if (constraints) rdev->constraints = kmemdup(constraints, sizeof(*constraints), GFP_KERNEL); else rdev->constraints = kzalloc(sizeof(*constraints), GFP_KERNEL); if (!rdev->constraints) return -ENOMEM; ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) goto out; ret = machine_constraints_current(rdev, rdev->constraints); if (ret != 0) goto out; /* do we need to setup our suspend state */ if (rdev->constraints->initial_state) { ret = suspend_prepare(rdev, rdev->constraints->initial_state); if (ret < 0) { rdev_err(rdev, "failed to set suspend state\n"); goto out; } } if (rdev->constraints->initial_mode) { if (!ops->set_mode) { rdev_err(rdev, "no set_mode operation\n"); ret = -EINVAL; goto out; } ret = ops->set_mode(rdev, rdev->constraints->initial_mode); if (ret < 0) { rdev_err(rdev, "failed to set initial mode: %d\n", ret); goto out; } } /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ if (rdev->constraints->always_on || rdev->constraints->boot_on) { ret = _regulator_do_enable(rdev); if (ret < 0 && ret != -EINVAL) { rdev_err(rdev, "failed to enable\n"); goto out; } } if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable) && ops->set_ramp_delay) { ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay); if (ret < 0) { rdev_err(rdev, "failed to set ramp_delay\n"); goto out; } } print_constraints(rdev); return 0; out: kfree(rdev->constraints); rdev->constraints = NULL; return ret; } /** * set_supply - set regulator supply regulator * @rdev: regulator name * @supply_rdev: supply regulator name * * Called by platform initialisation code to set the supply regulator for this * regulator. This ensures that a regulators supply will also be enabled by the * core if it's child is enabled. */ static int set_supply(struct regulator_dev *rdev, struct regulator_dev *supply_rdev) { int err; rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { err = -ENOMEM; return err; } supply_rdev->open_count++; return 0; } /** * set_consumer_device_supply - Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev_name: dev_name() string for device supply applies to * @supply: symbolic name for supply * * Allows platform initialisation code to map physical regulator * sources to symbolic names for supplies for use by devices. Devices * should use these symbolic names to request regulators, avoiding the * need to provide board-specific regulator names as platform data. */ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { struct regulator_map *node; int has_dev; if (supply == NULL) return -EINVAL; if (consumer_dev_name != NULL) has_dev = 1; else has_dev = 0; list_for_each_entry(node, &regulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) continue; } else if (node->dev_name || consumer_dev_name) { continue; } if (strcmp(node->supply, supply) != 0) continue; pr_debug("%s: %s/%s is '%s' supply; fail %s/%s\n", consumer_dev_name, dev_name(&node->regulator->dev), node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); return -EBUSY; } node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); if (node == NULL) return -ENOMEM; node->regulator = rdev; node->supply = supply; if (has_dev) { node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); if (node->dev_name == NULL) { kfree(node); return -ENOMEM; } } list_add(&node->list, &regulator_map_list); return 0; } static void unset_regulator_supplies(struct regulator_dev *rdev) { struct regulator_map *node, *n; list_for_each_entry_safe(node, n, &regulator_map_list, list) { if (rdev == node->regulator) { list_del(&node->list); kfree(node->dev_name); kfree(node); } } } #define REG_STR_SIZE 64 static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name) { struct regulator *regulator; char buf[REG_STR_SIZE]; int err, size; regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); if (regulator == NULL) return NULL; mutex_lock(&rdev->mutex); regulator->rdev = rdev; list_add(&regulator->list, &rdev->consumer_list); if (dev) { regulator->dev = dev; /* Add a link to the device sysfs entry */ size = scnprintf(buf, REG_STR_SIZE, "%s-%s", dev->kobj.name, supply_name); if (size >= REG_STR_SIZE) goto overflow_err; regulator->supply_name = kstrdup(buf, GFP_KERNEL); if (regulator->supply_name == NULL) goto overflow_err; err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj, buf); if (err) { rdev_warn(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); /* non-fatal */ } } else { regulator->supply_name = kstrdup(supply_name, GFP_KERNEL); if (regulator->supply_name == NULL) goto overflow_err; } regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs); if (!regulator->debugfs) { rdev_warn(rdev, "Failed to create debugfs directory\n"); } else { debugfs_create_u32("uA_load", 0444, regulator->debugfs, &regulator->uA_load); debugfs_create_u32("min_uV", 0444, regulator->debugfs, &regulator->min_uV); debugfs_create_u32("max_uV", 0444, regulator->debugfs, &regulator->max_uV); } /* * Check now if the regulator is an always on regulator - if * it is then we don't need to do nearly so much work for * enable/disable calls. */ if (!_regulator_can_change_status(rdev) && _regulator_is_enabled(rdev)) regulator->always_on = true; mutex_unlock(&rdev->mutex); return regulator; overflow_err: list_del(&regulator->list); kfree(regulator); mutex_unlock(&rdev->mutex); return NULL; } static int _regulator_get_enable_time(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->enable_time) return rdev->constraints->enable_time; if (!rdev->desc->ops->enable_time) return rdev->desc->enable_time; return rdev->desc->ops->enable_time(rdev); } static struct regulator_supply_alias *regulator_find_supply_alias( struct device *dev, const char *supply) { struct regulator_supply_alias *map; list_for_each_entry(map, &regulator_supply_alias_list, list) if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0) return map; return NULL; } static void regulator_supply_alias(struct device **dev, const char **supply) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(*dev, *supply); if (map) { dev_dbg(*dev, "Mapping supply %s to %s,%s\n", *supply, map->alias_supply, dev_name(map->alias_dev)); *dev = map->alias_dev; *supply = map->alias_supply; } } static struct regulator_dev *regulator_dev_lookup(struct device *dev, const char *supply, int *ret) { struct regulator_dev *r; struct device_node *node; struct regulator_map *map; const char *devname = NULL; regulator_supply_alias(&dev, &supply); /* first do a dt based lookup */ if (dev && dev->of_node) { node = of_get_regulator(dev, supply); if (node) { list_for_each_entry(r, &regulator_list, list) if (r->dev.parent && node == r->dev.of_node) return r; *ret = -EPROBE_DEFER; return NULL; } else { /* * If we couldn't even get the node then it's * not just that the device didn't register * yet, there's no node and we'll never * succeed. */ *ret = -ENODEV; } } /* if not found, try doing it non-dt way */ if (dev) devname = dev_name(dev); list_for_each_entry(r, &regulator_list, list) if (strcmp(rdev_get_name(r), supply) == 0) return r; list_for_each_entry(map, &regulator_map_list, list) { /* If the mapping has a device set up it must match */ if (map->dev_name && (!devname || strcmp(map->dev_name, devname))) continue; if (strcmp(map->supply, supply) == 0) return map->regulator; } return NULL; } /* Internal regulator request function */ static struct regulator *_regulator_get(struct device *dev, const char *id, bool exclusive, bool allow_dummy) { struct regulator_dev *rdev; struct regulator *regulator = ERR_PTR(-EPROBE_DEFER); const char *devname = NULL; int ret; if (id == NULL) { pr_err("get() with no identifier\n"); return ERR_PTR(-EINVAL); } if (dev) devname = dev_name(dev); if (have_full_constraints()) ret = -ENODEV; else ret = -EPROBE_DEFER; mutex_lock(&regulator_list_mutex); rdev = regulator_dev_lookup(dev, id, &ret); if (rdev) goto found; regulator = ERR_PTR(ret); /* * If we have return value from dev_lookup fail, we do not expect to * succeed, so, quit with appropriate error value */ if (ret && ret != -ENODEV) goto out; if (!devname) devname = "deviceless"; /* * Assume that a regulator is physically present and enabled * even if it isn't hooked up and just provide a dummy. */ if (have_full_constraints() && allow_dummy) { pr_warn("%s supply %s not found, using dummy regulator\n", devname, id); rdev = dummy_regulator_rdev; goto found; /* Don't log an error when called from regulator_get_optional() */ } else if (!have_full_constraints() || exclusive) { dev_warn(dev, "dummy supplies not allowed\n"); } mutex_unlock(&regulator_list_mutex); return regulator; found: if (rdev->exclusive) { regulator = ERR_PTR(-EPERM); goto out; } if (exclusive && rdev->open_count) { regulator = ERR_PTR(-EBUSY); goto out; } if (!try_module_get(rdev->owner)) goto out; regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); module_put(rdev->owner); goto out; } rdev->open_count++; if (exclusive) { rdev->exclusive = 1; ret = _regulator_is_enabled(rdev); if (ret > 0) rdev->use_count = 1; else rdev->use_count = 0; } out: mutex_unlock(&regulator_list_mutex); return regulator; } /** * regulator_get - lookup and obtain a reference to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get(struct device *dev, const char *id) { return _regulator_get(dev, id, false, true); } EXPORT_SYMBOL_GPL(regulator_get); /** * regulator_get_exclusive - obtain exclusive access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. Other consumers will be * unable to obtain this regulator while this reference is held and the * use count for the regulator will be initialised to reflect the current * state of the regulator. * * This is intended for use by consumers which cannot tolerate shared * use of the regulator such as those which need to force the * regulator off for correct operation of the hardware they are * controlling. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) { return _regulator_get(dev, id, true, false); } EXPORT_SYMBOL_GPL(regulator_get_exclusive); /** * regulator_get_optional - obtain optional access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. * * This is intended for use by consumers for devices which can have * some supplies unconnected in normal use, such as some MMC devices. * It can allow the regulator core to provide stub supplies for other * supplies requested using normal regulator_get() calls without * disrupting the operation of drivers that can handle absent * supplies. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get_optional(struct device *dev, const char *id) { return _regulator_get(dev, id, false, false); } EXPORT_SYMBOL_GPL(regulator_get_optional); /* Locks held by regulator_put() */ static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; if (regulator == NULL || IS_ERR(regulator)) return; rdev = regulator->rdev; debugfs_remove_recursive(regulator->debugfs); /* remove any sysfs entries */ if (regulator->dev) sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); kfree(regulator->supply_name); list_del(&regulator->list); kfree(regulator); rdev->open_count--; rdev->exclusive = 0; module_put(rdev->owner); } /** * regulator_put - "free" the regulator source * @regulator: regulator source * * Note: drivers must ensure that all regulator_enable calls made on this * regulator source are balanced by regulator_disable calls prior to calling * this function. */ void regulator_put(struct regulator *regulator) { mutex_lock(&regulator_list_mutex); _regulator_put(regulator); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_put); /** * regulator_register_supply_alias - Provide device alias for supply lookup * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * @alias_dev: device that should be used to lookup the supply * @alias_id: Supply name or regulator ID that should be used to lookup the * supply * * All lookups for id on dev will instead be conducted for alias_id on * alias_dev. */ int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) return -EEXIST; map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL); if (!map) return -ENOMEM; map->src_dev = dev; map->src_supply = id; map->alias_dev = alias_dev; map->alias_supply = alias_id; list_add(&map->list, &regulator_supply_alias_list); pr_info("Adding alias for supply %s,%s -> %s,%s\n", id, dev_name(dev), alias_id, dev_name(alias_dev)); return 0; } EXPORT_SYMBOL_GPL(regulator_register_supply_alias); /** * regulator_unregister_supply_alias - Remove device alias * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * * Remove a lookup alias if one exists for id on dev. */ void regulator_unregister_supply_alias(struct device *dev, const char *id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) { list_del(&map->list); kfree(map); } } EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias); /** * regulator_bulk_register_supply_alias - register multiple aliases * * @dev: device that will be given as the regulator "consumer" * @id: List of supply names or regulator IDs * @alias_dev: device that should be used to lookup the supply * @alias_id: List of supply names or regulator IDs that should be used to * lookup the supply * @num_id: Number of aliases to register * * @return 0 on success, an errno on failure. * * This helper function allows drivers to register several supply * aliases in one operation. If any of the aliases cannot be * registered any aliases that were registered will be removed * before returning to the caller. */ int regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id) { int i; int ret; for (i = 0; i < num_id; ++i) { ret = regulator_register_supply_alias(dev, id[i], alias_dev, alias_id[i]); if (ret < 0) goto err; } return 0; err: dev_err(dev, "Failed to create supply alias %s,%s -> %s,%s\n", id[i], dev_name(dev), alias_id[i], dev_name(alias_dev)); while (--i >= 0) regulator_unregister_supply_alias(dev, id[i]); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias); /** * regulator_bulk_unregister_supply_alias - unregister multiple aliases * * @dev: device that will be given as the regulator "consumer" * @id: List of supply names or regulator IDs * @num_id: Number of aliases to unregister * * This helper function allows drivers to unregister several supply * aliases in one operation. */ void regulator_bulk_unregister_supply_alias(struct device *dev, const char *const *id, int num_id) { int i; for (i = 0; i < num_id; ++i) regulator_unregister_supply_alias(dev, id[i]); } EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); /* Manage enable GPIO list. Same GPIO pin can be shared among regulators */ static int regulator_ena_gpio_request(struct regulator_dev *rdev, const struct regulator_config *config) { struct regulator_enable_gpio *pin; struct gpio_desc *gpiod; int ret; gpiod = gpio_to_desc(config->ena_gpio); list_for_each_entry(pin, &regulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { rdev_dbg(rdev, "GPIO %d is already used\n", config->ena_gpio); goto update_ena_gpio_to_rdev; } } ret = gpio_request_one(config->ena_gpio, GPIOF_DIR_OUT | config->ena_gpio_flags, rdev_get_name(rdev)); if (ret) return ret; pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); if (pin == NULL) { gpio_free(config->ena_gpio); return -ENOMEM; } pin->gpiod = gpiod; pin->ena_gpio_invert = config->ena_gpio_invert; list_add(&pin->list, &regulator_ena_gpio_list); update_ena_gpio_to_rdev: pin->request_count++; rdev->ena_pin = pin; return 0; } static void regulator_ena_gpio_free(struct regulator_dev *rdev) { struct regulator_enable_gpio *pin, *n; if (!rdev->ena_pin) return; /* Free the GPIO only in case of no use */ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) { if (pin->gpiod == rdev->ena_pin->gpiod) { if (pin->request_count <= 1) { pin->request_count = 0; gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); rdev->ena_pin = NULL; return; } else { pin->request_count--; } } } } /** * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control * @rdev: regulator_dev structure * @enable: enable GPIO at initial use? * * GPIO is enabled in case of initial use. (enable_count is 0) * GPIO is disabled when it is not shared any more. (enable_count <= 1) */ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) { struct regulator_enable_gpio *pin = rdev->ena_pin; if (!pin) return -EINVAL; if (enable) { /* Enable GPIO at initial use */ if (pin->enable_count == 0) gpiod_set_value_cansleep(pin->gpiod, !pin->ena_gpio_invert); pin->enable_count++; } else { if (pin->enable_count > 1) { pin->enable_count--; return 0; } /* Disable GPIO if not used */ if (pin->enable_count <= 1) { gpiod_set_value_cansleep(pin->gpiod, pin->ena_gpio_invert); pin->enable_count = 0; } } return 0; } /** * _regulator_enable_delay - a delay helper function * @delay: time to delay in microseconds * * Delay for the requested amount of time as per the guidelines in: * * Documentation/timers/timers-howto.txt * * The assumption here is that regulators will never be enabled in * atomic context and therefore sleeping functions can be used. */ static void _regulator_enable_delay(unsigned int delay) { unsigned int ms = delay / 1000; unsigned int us = delay % 1000; if (ms > 0) { /* * For small enough values, handle super-millisecond * delays in the usleep_range() call below. */ if (ms < 20) us += ms * 1000; else msleep(ms); } /* * Give the scheduler some room to coalesce with any other * wakeup sources. For delays shorter than 10 us, don't even * bother setting up high-resolution timers and just busy- * loop. */ if (us >= 10) usleep_range(us, us + 100); else udelay(us); } static int _regulator_do_enable(struct regulator_dev *rdev) { int ret, delay; /* Query before enabling in case configuration dependent. */ ret = _regulator_get_enable_time(rdev); if (ret >= 0) { delay = ret; } else { rdev_warn(rdev, "enable_time() failed: %d\n", ret); delay = 0; } trace_regulator_enable(rdev_get_name(rdev)); if (rdev->desc->off_on_delay) { /* if needed, keep a distance of off_on_delay from last time * this regulator was disabled. */ unsigned long start_jiffy = jiffies; unsigned long intended, max_delay, remaining; max_delay = usecs_to_jiffies(rdev->desc->off_on_delay); intended = rdev->last_off_jiffy + max_delay; if (time_before(start_jiffy, intended)) { /* calc remaining jiffies to deal with one-time * timer wrapping. * in case of multiple timer wrapping, either it can be * detected by out-of-range remaining, or it cannot be * detected and we gets a panelty of * _regulator_enable_delay(). */ remaining = intended - start_jiffy; if (remaining <= max_delay) _regulator_enable_delay( jiffies_to_usecs(remaining)); } } if (rdev->ena_pin) { ret = regulator_ena_gpio_ctrl(rdev, true); if (ret < 0) return ret; rdev->ena_gpio_state = 1; } else if (rdev->desc->ops->enable) { ret = rdev->desc->ops->enable(rdev); if (ret < 0) return ret; } else { return -EINVAL; } /* Allow the regulator to ramp; it would be useful to extend * this for bulk operations so that the regulators can ramp * together. */ trace_regulator_enable_delay(rdev_get_name(rdev)); _regulator_enable_delay(delay); trace_regulator_enable_complete(rdev_get_name(rdev)); return 0; } /* locks held by regulator_enable() */ static int _regulator_enable(struct regulator_dev *rdev) { int ret; /* check voltage and requested load before enabling */ if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) drms_uA_update(rdev); if (rdev->use_count == 0) { /* The regulator may on if it's not switchable or left on */ ret = _regulator_is_enabled(rdev); if (ret == -EINVAL || ret == 0) { if (!_regulator_can_change_status(rdev)) return -EPERM; ret = _regulator_do_enable(rdev); if (ret < 0) return ret; } else if (ret < 0) { rdev_err(rdev, "is_enabled() failed: %d\n", ret); return ret; } /* Fallthrough on positive return values - already enabled */ } rdev->use_count++; return 0; } /** * regulator_enable - enable regulator output * @regulator: regulator source * * Request that the regulator be enabled with the regulator output at * the predefined voltage or current value. Calls to regulator_enable() * must be balanced with calls to regulator_disable(). * * NOTE: the output value can be set by other drivers, boot loader or may be * hardwired in the regulator. */ int regulator_enable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (regulator->always_on) return 0; if (rdev->supply) { ret = regulator_enable(rdev->supply); if (ret != 0) return ret; } mutex_lock(&rdev->mutex); ret = _regulator_enable(rdev); mutex_unlock(&rdev->mutex); if (ret != 0 && rdev->supply) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_enable); static int _regulator_do_disable(struct regulator_dev *rdev) { int ret; trace_regulator_disable(rdev_get_name(rdev)); if (rdev->ena_pin) { ret = regulator_ena_gpio_ctrl(rdev, false); if (ret < 0) return ret; rdev->ena_gpio_state = 0; } else if (rdev->desc->ops->disable) { ret = rdev->desc->ops->disable(rdev); if (ret != 0) return ret; } /* cares about last_off_jiffy only if off_on_delay is required by * device. */ if (rdev->desc->off_on_delay) rdev->last_off_jiffy = jiffies; trace_regulator_disable_complete(rdev_get_name(rdev)); return 0; } /* locks held by regulator_disable() */ static int _regulator_disable(struct regulator_dev *rdev) { int ret = 0; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) return -EIO; /* are we the last user and permitted to disable ? */ if (rdev->use_count == 1 && (rdev->constraints && !rdev->constraints->always_on)) { /* we are last user */ if (_regulator_can_change_status(rdev)) { ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to disable\n"); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, NULL); } rdev->use_count = 0; } else if (rdev->use_count > 1) { if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) drms_uA_update(rdev); rdev->use_count--; } return ret; } /** * regulator_disable - disable regulator output * @regulator: regulator source * * Disable the regulator output voltage or current. Calls to * regulator_enable() must be balanced with calls to * regulator_disable(). * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. */ int regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (regulator->always_on) return 0; mutex_lock(&rdev->mutex); ret = _regulator_disable(rdev); mutex_unlock(&rdev->mutex); if (ret == 0 && rdev->supply) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_disable); /* locks held by regulator_force_disable() */ static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to force disable\n"); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | REGULATOR_EVENT_DISABLE, NULL); return 0; } /** * regulator_force_disable - force disable regulator output * @regulator: regulator source * * Forcibly disable the regulator output voltage or current. * NOTE: this *will* disable the regulator output even if other consumer * devices have it enabled. This should be used for situations when device * damage will likely occur if the regulator is not disabled (e.g. over temp). */ int regulator_force_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); regulator->uA_load = 0; ret = _regulator_force_disable(regulator->rdev); mutex_unlock(&rdev->mutex); if (rdev->supply) while (rdev->open_count--) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_force_disable); static void regulator_disable_work(struct work_struct *work) { struct regulator_dev *rdev = container_of(work, struct regulator_dev, disable_work.work); int count, i, ret; mutex_lock(&rdev->mutex); BUG_ON(!rdev->deferred_disables); count = rdev->deferred_disables; rdev->deferred_disables = 0; for (i = 0; i < count; i++) { ret = _regulator_disable(rdev); if (ret != 0) rdev_err(rdev, "Deferred disable failed: %d\n", ret); } mutex_unlock(&rdev->mutex); if (rdev->supply) { for (i = 0; i < count; i++) { ret = regulator_disable(rdev->supply); if (ret != 0) { rdev_err(rdev, "Supply disable failed: %d\n", ret); } } } } /** * regulator_disable_deferred - disable regulator output with delay * @regulator: regulator source * @ms: miliseconds until the regulator is disabled * * Execute regulator_disable() on the regulator after a delay. This * is intended for use with devices that require some time to quiesce. * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. */ int regulator_disable_deferred(struct regulator *regulator, int ms) { struct regulator_dev *rdev = regulator->rdev; int ret; if (regulator->always_on) return 0; if (!ms) return regulator_disable(regulator); mutex_lock(&rdev->mutex); rdev->deferred_disables++; mutex_unlock(&rdev->mutex); ret = queue_delayed_work(system_power_efficient_wq, &rdev->disable_work, msecs_to_jiffies(ms)); if (ret < 0) return ret; else return 0; } EXPORT_SYMBOL_GPL(regulator_disable_deferred); static int _regulator_is_enabled(struct regulator_dev *rdev) { /* A GPIO control always takes precedence */ if (rdev->ena_pin) return rdev->ena_gpio_state; /* If we don't know then assume that the regulator is always on */ if (!rdev->desc->ops->is_enabled) return 1; return rdev->desc->ops->is_enabled(rdev); } /** * regulator_is_enabled - is the regulator output enabled * @regulator: regulator source * * Returns positive if the regulator driver backing the source/client * has requested that the device be enabled, zero if it hasn't, else a * negative errno code. * * Note that the device backing this regulator handle can have multiple * users, so it might be enabled even if regulator_enable() was never * called for this particular source. */ int regulator_is_enabled(struct regulator *regulator) { int ret; if (regulator->always_on) return 1; mutex_lock(&regulator->rdev->mutex); ret = _regulator_is_enabled(regulator->rdev); mutex_unlock(&regulator->rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_is_enabled); /** * regulator_can_change_voltage - check if regulator can change voltage * @regulator: regulator source * * Returns positive if the regulator driver backing the source/client * can change its voltage, false otherwise. Useful for detecting fixed * or dummy regulators and disabling voltage change logic in the client * driver. */ int regulator_can_change_voltage(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1) return 1; if (rdev->desc->continuous_voltage_range && rdev->constraints->min_uV && rdev->constraints->max_uV && rdev->constraints->min_uV != rdev->constraints->max_uV) return 1; } return 0; } EXPORT_SYMBOL_GPL(regulator_can_change_voltage); /** * regulator_count_voltages - count regulator_list_voltage() selectors * @regulator: regulator source * * Returns number of selectors, or negative errno. Selectors are * numbered starting at zero, and typically correspond to bitfields * in hardware registers. */ int regulator_count_voltages(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; if (rdev->desc->n_voltages) return rdev->desc->n_voltages; if (!rdev->supply) return -EINVAL; return regulator_count_voltages(rdev->supply); } EXPORT_SYMBOL_GPL(regulator_count_voltages); /** * regulator_list_voltage - enumerate supported voltages * @regulator: regulator source * @selector: identify voltage to list * Context: can sleep * * Returns a voltage that can be passed to @regulator_set_voltage(), * zero if this selector code can't be used on this system, or a * negative errno. */ int regulator_list_voltage(struct regulator *regulator, unsigned selector) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; int ret; if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector) return rdev->desc->fixed_uV; if (ops->list_voltage) { if (selector >= rdev->desc->n_voltages) return -EINVAL; mutex_lock(&rdev->mutex); ret = ops->list_voltage(rdev, selector); mutex_unlock(&rdev->mutex); } else if (rdev->supply) { ret = regulator_list_voltage(rdev->supply, selector); } else { return -EINVAL; } if (ret > 0) { if (ret < rdev->constraints->min_uV) ret = 0; else if (ret > rdev->constraints->max_uV) ret = 0; } return ret; } EXPORT_SYMBOL_GPL(regulator_list_voltage); /** * regulator_get_regmap - get the regulator's register map * @regulator: regulator source * * Returns the register map for the given regulator, or an ERR_PTR value * if the regulator doesn't use regmap. */ struct regmap *regulator_get_regmap(struct regulator *regulator) { struct regmap *map = regulator->rdev->regmap; return map ? map : ERR_PTR(-EOPNOTSUPP); } /** * regulator_get_hardware_vsel_register - get the HW voltage selector register * @regulator: regulator source * @vsel_reg: voltage selector register, output parameter * @vsel_mask: mask for voltage selector bitfield, output parameter * * Returns the hardware register offset and bitmask used for setting the * regulator voltage. This might be useful when configuring voltage-scaling * hardware or firmware that can make I2C requests behind the kernel's back, * for example. * * On success, the output parameters @vsel_reg and @vsel_mask are filled in * and 0 is returned, otherwise a negative errno is returned. */ int regulator_get_hardware_vsel_register(struct regulator *regulator, unsigned *vsel_reg, unsigned *vsel_mask) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; *vsel_reg = rdev->desc->vsel_reg; *vsel_mask = rdev->desc->vsel_mask; return 0; } EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register); /** * regulator_list_hardware_vsel - get the HW-specific register value for a selector * @regulator: regulator source * @selector: identify voltage to list * * Converts the selector to a hardware-specific voltage selector that can be * directly written to the regulator registers. The address of the voltage * register can be determined by calling @regulator_get_hardware_vsel_register. * * On error a negative errno is returned. */ int regulator_list_hardware_vsel(struct regulator *regulator, unsigned selector) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; if (selector >= rdev->desc->n_voltages) return -EINVAL; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; return selector; } EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel); /** * regulator_get_linear_step - return the voltage step size between VSEL values * @regulator: regulator source * * Returns the voltage step size between VSEL values for linear * regulators, or return 0 if the regulator isn't a linear regulator. */ unsigned int regulator_get_linear_step(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; return rdev->desc->uV_step; } EXPORT_SYMBOL_GPL(regulator_get_linear_step); /** * regulator_is_supported_voltage - check if a voltage range can be supported * * @regulator: Regulator to check. * @min_uV: Minimum required voltage in uV. * @max_uV: Maximum required voltage in uV. * * Returns a boolean or a negative error code. */ int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; int i, voltages, ret; /* If we can't change voltage check the current voltage */ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { ret = regulator_get_voltage(regulator); if (ret >= 0) return min_uV <= ret && ret <= max_uV; else return ret; } /* Any voltage within constrains range is fine? */ if (rdev->desc->continuous_voltage_range) return min_uV >= rdev->constraints->min_uV && max_uV <= rdev->constraints->max_uV; ret = regulator_count_voltages(regulator); if (ret < 0) return ret; voltages = ret; for (i = 0; i < voltages; i++) { ret = regulator_list_voltage(regulator, i); if (ret >= min_uV && ret <= max_uV) return 1; } return 0; } EXPORT_SYMBOL_GPL(regulator_is_supported_voltage); static int _regulator_call_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct pre_voltage_change_data data; int ret; data.old_uV = _regulator_get_voltage(rdev); data.min_uV = min_uV; data.max_uV = max_uV; ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, &data); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector); if (ret >= 0) return ret; _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, (void *)data.old_uV); return ret; } static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev, int uV, unsigned selector) { struct pre_voltage_change_data data; int ret; data.old_uV = _regulator_get_voltage(rdev); data.min_uV = uV; data.max_uV = uV; ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, &data); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = rdev->desc->ops->set_voltage_sel(rdev, selector); if (ret >= 0) return ret; _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, (void *)data.old_uV); return ret; } static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret; int delay = 0; int best_val = 0; unsigned int selector; int old_selector = -1; trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); min_uV += rdev->constraints->uV_offset; max_uV += rdev->constraints->uV_offset; /* * If we can't obtain the old selector there is not enough * info to call set_voltage_time_sel(). */ if (_regulator_is_enabled(rdev) && rdev->desc->ops->set_voltage_time_sel && rdev->desc->ops->get_voltage_sel) { old_selector = rdev->desc->ops->get_voltage_sel(rdev); if (old_selector < 0) return old_selector; } if (rdev->desc->ops->set_voltage) { ret = _regulator_call_set_voltage(rdev, min_uV, max_uV, &selector); if (ret >= 0) { if (rdev->desc->ops->list_voltage) best_val = rdev->desc->ops->list_voltage(rdev, selector); else best_val = _regulator_get_voltage(rdev); } } else if (rdev->desc->ops->set_voltage_sel) { if (rdev->desc->ops->map_voltage) { ret = rdev->desc->ops->map_voltage(rdev, min_uV, max_uV); } else { if (rdev->desc->ops->list_voltage == regulator_list_voltage_linear) ret = regulator_map_voltage_linear(rdev, min_uV, max_uV); else if (rdev->desc->ops->list_voltage == regulator_list_voltage_linear_range) ret = regulator_map_voltage_linear_range(rdev, min_uV, max_uV); else ret = regulator_map_voltage_iterate(rdev, min_uV, max_uV); } if (ret >= 0) { best_val = rdev->desc->ops->list_voltage(rdev, ret); if (min_uV <= best_val && max_uV >= best_val) { selector = ret; if (old_selector == selector) ret = 0; else ret = _regulator_call_set_voltage_sel( rdev, best_val, selector); } else { ret = -EINVAL; } } } else { ret = -EINVAL; } /* Call set_voltage_time_sel if successfully obtained old_selector */ if (ret == 0 && !rdev->constraints->ramp_disable && old_selector >= 0 && old_selector != selector) { delay = rdev->desc->ops->set_voltage_time_sel(rdev, old_selector, selector); if (delay < 0) { rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n", delay); delay = 0; } /* Insert any necessary delays */ if (delay >= 1000) { mdelay(delay / 1000); udelay(delay % 1000); } else if (delay) { udelay(delay); } } if (ret == 0 && best_val >= 0) { unsigned long data = best_val; _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, (void *)data); } trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val); return ret; } /** * regulator_set_voltage - set regulator output voltage * @regulator: regulator source * @min_uV: Minimum required voltage in uV * @max_uV: Maximum acceptable voltage in uV * * Sets a voltage regulator to the desired output voltage. This can be set * during any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the voltage will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new voltage when enabled. * * NOTE: If the regulator is shared between several devices then the lowest * request voltage that meets the system constraints will be used. * Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; int old_min_uV, old_max_uV; int current_uV; mutex_lock(&rdev->mutex); /* If we're setting the same range as last time the change * should be a noop (some cpufreq implementations use the same * voltage for multiple frequencies, for example). */ if (regulator->min_uV == min_uV && regulator->max_uV == max_uV) goto out; /* If we're trying to set a range that overlaps the current voltage, * return succesfully even though the regulator does not support * changing the voltage. */ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { current_uV = _regulator_get_voltage(rdev); if (min_uV <= current_uV && current_uV <= max_uV) { regulator->min_uV = min_uV; regulator->max_uV = max_uV; goto out; } } /* sanity check */ if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; /* restore original values in case of error */ old_min_uV = regulator->min_uV; old_max_uV = regulator->max_uV; regulator->min_uV = min_uV; regulator->max_uV = max_uV; ret = regulator_check_consumers(rdev, &min_uV, &max_uV); if (ret < 0) goto out2; ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); if (ret < 0) goto out2; out: mutex_unlock(&rdev->mutex); return ret; out2: regulator->min_uV = old_min_uV; regulator->max_uV = old_max_uV; mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage); /** * regulator_set_voltage_time - get raise/fall time * @regulator: regulator source * @old_uV: starting voltage in microvolts * @new_uV: target voltage in microvolts * * Provided with the starting and ending voltage, this function attempts to * calculate the time in microseconds required to rise or fall to this new * voltage. */ int regulator_set_voltage_time(struct regulator *regulator, int old_uV, int new_uV) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; int old_sel = -1; int new_sel = -1; int voltage; int i; /* Currently requires operations to do this */ if (!ops->list_voltage || !ops->set_voltage_time_sel || !rdev->desc->n_voltages) return -EINVAL; for (i = 0; i < rdev->desc->n_voltages; i++) { /* We only look for exact voltage matches here */ voltage = regulator_list_voltage(regulator, i); if (voltage < 0) return -EINVAL; if (voltage == 0) continue; if (voltage == old_uV) old_sel = i; if (voltage == new_uV) new_sel = i; } if (old_sel < 0 || new_sel < 0) return -EINVAL; return ops->set_voltage_time_sel(rdev, old_sel, new_sel); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time); /** * regulator_set_voltage_time_sel - get raise/fall time * @rdev: regulator source device * @old_selector: selector for starting voltage * @new_selector: selector for target voltage * * Provided with the starting and target voltage selectors, this function * returns time in microseconds required to rise or fall to this new voltage * * Drivers providing ramp_delay in regulation_constraints can use this as their * set_voltage_time_sel() operation. */ int regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_selector, unsigned int new_selector) { unsigned int ramp_delay = 0; int old_volt, new_volt; if (rdev->constraints->ramp_delay) ramp_delay = rdev->constraints->ramp_delay; else if (rdev->desc->ramp_delay) ramp_delay = rdev->desc->ramp_delay; if (ramp_delay == 0) { rdev_warn(rdev, "ramp_delay not set\n"); return 0; } /* sanity check */ if (!rdev->desc->ops->list_voltage) return -EINVAL; old_volt = rdev->desc->ops->list_voltage(rdev, old_selector); new_volt = rdev->desc->ops->list_voltage(rdev, new_selector); return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel); /** * regulator_sync_voltage - re-apply last regulator output voltage * @regulator: regulator source * * Re-apply the last configured voltage. This is intended to be used * where some external control source the consumer is cooperating with * has caused the configured voltage to change. */ int regulator_sync_voltage(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret, min_uV, max_uV; mutex_lock(&rdev->mutex); if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* This is only going to work if we've had a voltage configured. */ if (!regulator->min_uV && !regulator->max_uV) { ret = -EINVAL; goto out; } min_uV = regulator->min_uV; max_uV = regulator->max_uV; /* This should be a paranoia check... */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = regulator_check_consumers(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_sync_voltage); static int _regulator_get_voltage(struct regulator_dev *rdev) { int sel, ret; if (rdev->desc->ops->get_voltage_sel) { sel = rdev->desc->ops->get_voltage_sel(rdev); if (sel < 0) return sel; ret = rdev->desc->ops->list_voltage(rdev, sel); } else if (rdev->desc->ops->get_voltage) { ret = rdev->desc->ops->get_voltage(rdev); } else if (rdev->desc->ops->list_voltage) { ret = rdev->desc->ops->list_voltage(rdev, 0); } else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) { ret = rdev->desc->fixed_uV; } else if (rdev->supply) { ret = regulator_get_voltage(rdev->supply); } else { return -EINVAL; } if (ret < 0) return ret; return ret - rdev->constraints->uV_offset; } /** * regulator_get_voltage - get regulator output voltage * @regulator: regulator source * * This returns the current regulator voltage in uV. * * NOTE: If the regulator is disabled it will return the voltage value. This * function should not be used to determine regulator state. */ int regulator_get_voltage(struct regulator *regulator) { int ret; mutex_lock(&regulator->rdev->mutex); ret = _regulator_get_voltage(regulator->rdev); mutex_unlock(&regulator->rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_get_voltage); /** * regulator_set_current_limit - set regulator output current limit * @regulator: regulator source * @min_uA: Minimum supported current in uA * @max_uA: Maximum supported current in uA * * Sets current sink to the desired output current. This can be set during * any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the current will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new current when enabled. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->set_current_limit) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_current_limit(rdev, &min_uA, &max_uA); if (ret < 0) goto out; ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_current_limit); static int _regulator_get_current_limit(struct regulator_dev *rdev) { int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->get_current_limit) { ret = -EINVAL; goto out; } ret = rdev->desc->ops->get_current_limit(rdev); out: mutex_unlock(&rdev->mutex); return ret; } /** * regulator_get_current_limit - get regulator output current * @regulator: regulator source * * This returns the current supplied by the specified current sink in uA. * * NOTE: If the regulator is disabled it will return the current value. This * function should not be used to determine regulator state. */ int regulator_get_current_limit(struct regulator *regulator) { return _regulator_get_current_limit(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_current_limit); /** * regulator_set_mode - set regulator operating mode * @regulator: regulator source * @mode: operating mode - one of the REGULATOR_MODE constants * * Set regulator operating mode to increase regulator efficiency or improve * regulation performance. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_mode(struct regulator *regulator, unsigned int mode) { struct regulator_dev *rdev = regulator->rdev; int ret; int regulator_curr_mode; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->set_mode) { ret = -EINVAL; goto out; } /* return if the same mode is requested */ if (rdev->desc->ops->get_mode) { regulator_curr_mode = rdev->desc->ops->get_mode(rdev); if (regulator_curr_mode == mode) { ret = 0; goto out; } } /* constraints check */ ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) goto out; ret = rdev->desc->ops->set_mode(rdev, mode); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_mode); static unsigned int _regulator_get_mode(struct regulator_dev *rdev) { int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->get_mode) { ret = -EINVAL; goto out; } ret = rdev->desc->ops->get_mode(rdev); out: mutex_unlock(&rdev->mutex); return ret; } /** * regulator_get_mode - get regulator operating mode * @regulator: regulator source * * Get the current regulator operating mode. */ unsigned int regulator_get_mode(struct regulator *regulator) { return _regulator_get_mode(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_mode); /** * regulator_set_optimum_mode - set regulator optimum operating mode * @regulator: regulator source * @uA_load: load current * * Notifies the regulator core of a new device load. This is then used by * DRMS (if enabled by constraints) to set the most efficient regulator * operating mode for the new regulator loading. * * Consumer devices notify their supply regulator of the maximum power * they will require (can be taken from device datasheet in the power * consumption tables) when they change operational status and hence power * state. Examples of operational state changes that can affect power * consumption are :- * * o Device is opened / closed. * o Device I/O is about to begin or has just finished. * o Device is idling in between work. * * This information is also exported via sysfs to userspace. * * DRMS will sum the total requested load on the regulator and change * to the most efficient operating mode if platform constraints allow. * * Returns the new regulator mode or error. */ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) { struct regulator_dev *rdev = regulator->rdev; struct regulator *consumer; int ret, output_uV, input_uV = 0, total_uA_load = 0; unsigned int mode; if (rdev->supply) input_uV = regulator_get_voltage(rdev->supply); mutex_lock(&rdev->mutex); /* * first check to see if we can set modes at all, otherwise just * tell the consumer everything is OK. */ regulator->uA_load = uA_load; ret = regulator_check_drms(rdev); if (ret < 0) { ret = 0; goto out; } if (!rdev->desc->ops->get_optimum_mode) goto out; /* * we can actually do this so any errors are indicators of * potential real failure. */ ret = -EINVAL; if (!rdev->desc->ops->set_mode) goto out; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) { rdev_err(rdev, "invalid output voltage found\n"); goto out; } /* No supply? Use constraint voltage */ if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { rdev_err(rdev, "invalid input voltage found\n"); goto out; } /* calc total requested load for this regulator */ list_for_each_entry(consumer, &rdev->consumer_list, list) total_uA_load += consumer->uA_load; mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, total_uA_load); ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) { rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", total_uA_load, input_uV, output_uV); goto out; } ret = rdev->desc->ops->set_mode(rdev, mode); if (ret < 0) { rdev_err(rdev, "failed to set optimum mode %x\n", mode); goto out; } ret = mode; out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); /** * regulator_allow_bypass - allow the regulator to go into bypass mode * * @regulator: Regulator to configure * @enable: enable or disable bypass mode * * Allow the regulator to go into bypass mode if all other consumers * for the regulator also enable bypass mode and the machine * constraints allow this. Bypass mode means that the regulator is * simply passing the input directly to the output with no regulation. */ int regulator_allow_bypass(struct regulator *regulator, bool enable) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (!rdev->desc->ops->set_bypass) return 0; if (rdev->constraints && !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS)) return 0; mutex_lock(&rdev->mutex); if (enable && !regulator->bypass) { rdev->bypass_count++; if (rdev->bypass_count == rdev->open_count) { ret = rdev->desc->ops->set_bypass(rdev, enable); if (ret != 0) rdev->bypass_count--; } } else if (!enable && regulator->bypass) { rdev->bypass_count--; if (rdev->bypass_count != rdev->open_count) { ret = rdev->desc->ops->set_bypass(rdev, enable); if (ret != 0) rdev->bypass_count++; } } if (ret == 0) regulator->bypass = enable; mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_allow_bypass); /** * regulator_register_notifier - register regulator event notifier * @regulator: regulator source * @nb: notifier block * * Register notifier block to receive regulator events. */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_register(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_register_notifier); /** * regulator_unregister_notifier - unregister regulator event notifier * @regulator: regulator source * @nb: notifier block * * Unregister regulator event notifier block. */ int regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_unregister_notifier); /* notify regulator consumers and downstream regulator consumers. * Note mutex must be held by caller. */ static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ return blocking_notifier_call_chain(&rdev->notifier, event, data); } /** * regulator_bulk_get - get multiple regulator consumers * * @dev: Device to supply * @num_consumers: Number of consumers to register * @consumers: Configuration of consumers; clients are stored here. * * @return 0 on success, an errno on failure. * * This helper function allows drivers to get several regulator * consumers in one operation. If any of the regulators cannot be * acquired then any regulators that were allocated will be freed * before returning to the caller. */ int regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].consumer = NULL; for (i = 0; i < num_consumers; i++) { consumers[i].consumer = regulator_get(dev, consumers[i].supply); if (IS_ERR(consumers[i].consumer)) { ret = PTR_ERR(consumers[i].consumer); dev_err(dev, "Failed to get supply '%s': %d\n", consumers[i].supply, ret); consumers[i].consumer = NULL; goto err; } } return 0; err: while (--i >= 0) regulator_put(consumers[i].consumer); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_get); static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) { struct regulator_bulk_data *bulk = data; bulk->ret = regulator_enable(bulk->consumer); } /** * regulator_bulk_enable - enable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to enable multiple regulator * clients in a single API call. If any consumers cannot be enabled * then any others that were enabled will be disabled again prior to * return. */ int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { ASYNC_DOMAIN_EXCLUSIVE(async_domain); int i; int ret = 0; for (i = 0; i < num_consumers; i++) { if (consumers[i].consumer->always_on) consumers[i].ret = 0; else async_schedule_domain(regulator_bulk_enable_async, &consumers[i], &async_domain); } async_synchronize_full_domain(&async_domain); /* If any consumer failed we need to unwind any that succeeded */ for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto err; } } return 0; err: for (i = 0; i < num_consumers; i++) { if (consumers[i].ret < 0) pr_err("Failed to enable %s: %d\n", consumers[i].supply, consumers[i].ret); else regulator_disable(consumers[i].consumer); } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_enable); /** * regulator_bulk_disable - disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to disable multiple regulator * clients in a single API call. If any consumers cannot be disabled * then any others that were disabled will be enabled again prior to * return. */ int regulator_bulk_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret, r; for (i = num_consumers - 1; i >= 0; --i) { ret = regulator_disable(consumers[i].consumer); if (ret != 0) goto err; } return 0; err: pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret); for (++i; i < num_consumers; ++i) { r = regulator_enable(consumers[i].consumer); if (r != 0) pr_err("Failed to reename %s: %d\n", consumers[i].supply, r); } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_disable); /** * regulator_bulk_force_disable - force disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to forcibly disable multiple regulator * clients in a single API call. * NOTE: This should be used for situations when device damage will * likely occur if the regulators are not disabled (e.g. over temp). * Although regulator_force_disable function call for some consumers can * return error numbers, the function is called for all consumers. */ int regulator_bulk_force_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].ret = regulator_force_disable(consumers[i].consumer); for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto out; } } return 0; out: return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_force_disable); /** * regulator_bulk_free - free multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to free multiple regulator * clients in a single API call. */ void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers) { int i; for (i = 0; i < num_consumers; i++) { regulator_put(consumers[i].consumer); consumers[i].consumer = NULL; } } EXPORT_SYMBOL_GPL(regulator_bulk_free); /** * regulator_notifier_call_chain - call regulator event notifier * @rdev: regulator source * @event: notifier block * @data: callback-specific data. * * Called by regulator drivers to notify clients a regulator event has * occurred. We also notify regulator clients downstream. * Note lock must be held by caller. */ int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { _notifier_call_chain(rdev, event, data); return NOTIFY_DONE; } EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); /** * regulator_mode_to_status - convert a regulator mode into a status * * @mode: Mode to convert * * Convert a regulator mode into a status. */ int regulator_mode_to_status(unsigned int mode) { switch (mode) { case REGULATOR_MODE_FAST: return REGULATOR_STATUS_FAST; case REGULATOR_MODE_NORMAL: return REGULATOR_STATUS_NORMAL; case REGULATOR_MODE_IDLE: return REGULATOR_STATUS_IDLE; case REGULATOR_MODE_STANDBY: return REGULATOR_STATUS_STANDBY; default: return REGULATOR_STATUS_UNDEFINED; } } EXPORT_SYMBOL_GPL(regulator_mode_to_status); /* * To avoid cluttering sysfs (and memory) with useless state, only * create attributes that can be meaningfully displayed. */ static int add_regulator_attributes(struct regulator_dev *rdev) { struct device *dev = &rdev->dev; const struct regulator_ops *ops = rdev->desc->ops; int status = 0; /* some attributes need specific methods to be displayed */ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { status = device_create_file(dev, &dev_attr_microvolts); if (status < 0) return status; } if (ops->get_current_limit) { status = device_create_file(dev, &dev_attr_microamps); if (status < 0) return status; } if (ops->get_mode) { status = device_create_file(dev, &dev_attr_opmode); if (status < 0) return status; } if (rdev->ena_pin || ops->is_enabled) { status = device_create_file(dev, &dev_attr_state); if (status < 0) return status; } if (ops->get_status) { status = device_create_file(dev, &dev_attr_status); if (status < 0) return status; } if (ops->get_bypass) { status = device_create_file(dev, &dev_attr_bypass); if (status < 0) return status; } /* some attributes are type-specific */ if (rdev->desc->type == REGULATOR_CURRENT) { status = device_create_file(dev, &dev_attr_requested_microamps); if (status < 0) return status; } /* all the other attributes exist to support constraints; * don't show them if there are no constraints, or if the * relevant supporting methods are missing. */ if (!rdev->constraints) return status; /* constraints need specific supporting methods */ if (ops->set_voltage || ops->set_voltage_sel) { status = device_create_file(dev, &dev_attr_min_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_max_microvolts); if (status < 0) return status; } if (ops->set_current_limit) { status = device_create_file(dev, &dev_attr_min_microamps); if (status < 0) return status; status = device_create_file(dev, &dev_attr_max_microamps); if (status < 0) return status; } status = device_create_file(dev, &dev_attr_suspend_standby_state); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_state); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_state); if (status < 0) return status; if (ops->set_suspend_voltage) { status = device_create_file(dev, &dev_attr_suspend_standby_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_microvolts); if (status < 0) return status; } if (ops->set_suspend_mode) { status = device_create_file(dev, &dev_attr_suspend_standby_mode); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_mode); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_mode); if (status < 0) return status; } return status; } static void rdev_init_debugfs(struct regulator_dev *rdev) { rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); if (!rdev->debugfs) { rdev_warn(rdev, "Failed to create debugfs directory\n"); return; } debugfs_create_u32("use_count", 0444, rdev->debugfs, &rdev->use_count); debugfs_create_u32("open_count", 0444, rdev->debugfs, &rdev->open_count); debugfs_create_u32("bypass_count", 0444, rdev->debugfs, &rdev->bypass_count); } /** * regulator_register - register regulator * @regulator_desc: regulator to register * @config: runtime configuration for regulator * * Called by regulator drivers to register a regulator. * Returns a valid pointer to struct regulator_dev on success * or an ERR_PTR() on error. */ struct regulator_dev * regulator_register(const struct regulator_desc *regulator_desc, const struct regulator_config *config) { const struct regulation_constraints *constraints = NULL; const struct regulator_init_data *init_data; static atomic_t regulator_no = ATOMIC_INIT(0); struct regulator_dev *rdev; struct device *dev; int ret, i; const char *supply = NULL; if (regulator_desc == NULL || config == NULL) return ERR_PTR(-EINVAL); dev = config->dev; WARN_ON(!dev); if (regulator_desc->name == NULL || regulator_desc->ops == NULL) return ERR_PTR(-EINVAL); if (regulator_desc->type != REGULATOR_VOLTAGE && regulator_desc->type != REGULATOR_CURRENT) return ERR_PTR(-EINVAL); /* Only one of each should be implemented */ WARN_ON(regulator_desc->ops->get_voltage && regulator_desc->ops->get_voltage_sel); WARN_ON(regulator_desc->ops->set_voltage && regulator_desc->ops->set_voltage_sel); /* If we're using selectors we must implement list_voltage. */ if (regulator_desc->ops->get_voltage_sel && !regulator_desc->ops->list_voltage) { return ERR_PTR(-EINVAL); } if (regulator_desc->ops->set_voltage_sel && !regulator_desc->ops->list_voltage) { return ERR_PTR(-EINVAL); } rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL); if (rdev == NULL) return ERR_PTR(-ENOMEM); init_data = regulator_of_get_init_data(dev, regulator_desc, &rdev->dev.of_node); if (!init_data) { init_data = config->init_data; rdev->dev.of_node = of_node_get(config->of_node); } mutex_lock(&regulator_list_mutex); mutex_init(&rdev->mutex); rdev->reg_data = config->driver_data; rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; if (config->regmap) rdev->regmap = config->regmap; else if (dev_get_regmap(dev, NULL)) rdev->regmap = dev_get_regmap(dev, NULL); else if (dev->parent) rdev->regmap = dev_get_regmap(dev->parent, NULL); INIT_LIST_HEAD(&rdev->consumer_list); INIT_LIST_HEAD(&rdev->list); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work); /* preform any regulator specific init */ if (init_data && init_data->regulator_init) { ret = init_data->regulator_init(rdev->reg_data); if (ret < 0) goto clean; } /* register with sysfs */ rdev->dev.class = &regulator_class; rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%d", atomic_inc_return(&regulator_no) - 1); ret = device_register(&rdev->dev); if (ret != 0) { put_device(&rdev->dev); goto clean; } dev_set_drvdata(&rdev->dev, rdev); if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { ret = regulator_ena_gpio_request(rdev, config); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", config->ena_gpio, ret); goto wash; } if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) rdev->ena_gpio_state = 1; if (config->ena_gpio_invert) rdev->ena_gpio_state = !rdev->ena_gpio_state; } /* set regulator constraints */ if (init_data) constraints = &init_data->constraints; ret = set_machine_constraints(rdev, constraints); if (ret < 0) goto scrub; /* add attributes supported by this regulator */ ret = add_regulator_attributes(rdev); if (ret < 0) goto scrub; if (init_data && init_data->supply_regulator) supply = init_data->supply_regulator; else if (regulator_desc->supply_name) supply = regulator_desc->supply_name; if (supply) { struct regulator_dev *r; r = regulator_dev_lookup(dev, supply, &ret); if (ret == -ENODEV) { /* * No supply was specified for this regulator and * there will never be one. */ ret = 0; goto add_dev; } else if (!r) { dev_err(dev, "Failed to find supply %s\n", supply); ret = -EPROBE_DEFER; goto scrub; } ret = set_supply(rdev, r); if (ret < 0) goto scrub; /* Enable supply if rail is enabled */ if (_regulator_is_enabled(rdev)) { ret = regulator_enable(rdev->supply); if (ret < 0) goto scrub; } } add_dev: /* add consumers devices */ if (init_data) { for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } } list_add(&rdev->list, &regulator_list); rdev_init_debugfs(rdev); out: mutex_unlock(&regulator_list_mutex); return rdev; unset_supplies: unset_regulator_supplies(rdev); scrub: if (rdev->supply) _regulator_put(rdev->supply); regulator_ena_gpio_free(rdev); kfree(rdev->constraints); wash: device_unregister(&rdev->dev); /* device core frees rdev */ rdev = ERR_PTR(ret); goto out; clean: kfree(rdev); rdev = ERR_PTR(ret); goto out; } EXPORT_SYMBOL_GPL(regulator_register); /** * regulator_unregister - unregister regulator * @rdev: regulator to unregister * * Called by regulator drivers to unregister a regulator. */ void regulator_unregister(struct regulator_dev *rdev) { if (rdev == NULL) return; if (rdev->supply) { while (rdev->use_count--) regulator_disable(rdev->supply); regulator_put(rdev->supply); } mutex_lock(&regulator_list_mutex); debugfs_remove_recursive(rdev->debugfs); flush_work(&rdev->disable_work.work); WARN_ON(rdev->open_count); unset_regulator_supplies(rdev); list_del(&rdev->list); kfree(rdev->constraints); regulator_ena_gpio_free(rdev); of_node_put(rdev->dev.of_node); device_unregister(&rdev->dev); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_unregister); /** * regulator_suspend_prepare - prepare regulators for system wide suspend * @state: system suspend state * * Configure each regulator with it's suspend operating parameters for state. * This will usually be called by machine suspend code prior to supending. */ int regulator_suspend_prepare(suspend_state_t state) { struct regulator_dev *rdev; int ret = 0; /* ON is handled by regulator active state */ if (state == PM_SUSPEND_ON) return -EINVAL; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { mutex_lock(&rdev->mutex); ret = suspend_prepare(rdev, state); mutex_unlock(&rdev->mutex); if (ret < 0) { rdev_err(rdev, "failed to prepare\n"); goto out; } } out: mutex_unlock(&regulator_list_mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_suspend_prepare); /** * regulator_suspend_finish - resume regulators from system wide suspend * * Turn on regulators that might be turned off by regulator_suspend_prepare * and that should be turned on according to the regulators properties. */ int regulator_suspend_finish(void) { struct regulator_dev *rdev; int ret = 0, error; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { mutex_lock(&rdev->mutex); if (rdev->use_count > 0 || rdev->constraints->always_on) { error = _regulator_do_enable(rdev); if (error) ret = error; } else { if (!have_full_constraints()) goto unlock; if (!_regulator_is_enabled(rdev)) goto unlock; error = _regulator_do_disable(rdev); if (error) ret = error; } unlock: mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_suspend_finish); /** * regulator_has_full_constraints - the system has fully specified constraints * * Calling this function will cause the regulator API to disable all * regulators which have a zero use count and don't have an always_on * constraint in a late_initcall. * * The intention is that this will become the default behaviour in a * future kernel release so users are encouraged to use this facility * now. */ void regulator_has_full_constraints(void) { has_full_constraints = 1; } EXPORT_SYMBOL_GPL(regulator_has_full_constraints); /** * rdev_get_drvdata - get rdev regulator driver data * @rdev: regulator * * Get rdev regulator driver private data. This call can be used in the * regulator driver context. */ void *rdev_get_drvdata(struct regulator_dev *rdev) { return rdev->reg_data; } EXPORT_SYMBOL_GPL(rdev_get_drvdata); /** * regulator_get_drvdata - get regulator driver data * @regulator: regulator * * Get regulator driver private data. This call can be used in the consumer * driver context when non API regulator specific functions need to be called. */ void *regulator_get_drvdata(struct regulator *regulator) { return regulator->rdev->reg_data; } EXPORT_SYMBOL_GPL(regulator_get_drvdata); /** * regulator_set_drvdata - set regulator driver data * @regulator: regulator * @data: data */ void regulator_set_drvdata(struct regulator *regulator, void *data) { regulator->rdev->reg_data = data; } EXPORT_SYMBOL_GPL(regulator_set_drvdata); /** * regulator_get_id - get regulator ID * @rdev: regulator */ int rdev_get_id(struct regulator_dev *rdev) { return rdev->desc->id; } EXPORT_SYMBOL_GPL(rdev_get_id); struct device *rdev_get_dev(struct regulator_dev *rdev) { return &rdev->dev; } EXPORT_SYMBOL_GPL(rdev_get_dev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data) { return reg_init_data->driver_data; } EXPORT_SYMBOL_GPL(regulator_get_init_drvdata); #ifdef CONFIG_DEBUG_FS static ssize_t supply_map_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct regulator_map *map; if (!buf) return -ENOMEM; list_for_each_entry(map, &regulator_map_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s -> %s.%s\n", rdev_get_name(map->regulator), map->dev_name, map->supply); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } #endif static const struct file_operations supply_map_fops = { #ifdef CONFIG_DEBUG_FS .read = supply_map_read_file, .llseek = default_llseek, #endif }; static int __init regulator_init(void) { int ret; ret = class_register(&regulator_class); debugfs_root = debugfs_create_dir("regulator", NULL); if (!debugfs_root) pr_warn("regulator: Failed to create debugfs directory\n"); debugfs_create_file("supply_map", 0444, debugfs_root, NULL, &supply_map_fops); regulator_dummy_init(); return ret; } /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); static int __init regulator_init_complete(void) { struct regulator_dev *rdev; const struct regulator_ops *ops; struct regulation_constraints *c; int enabled, ret; /* * Since DT doesn't provide an idiomatic mechanism for * enabling full constraints and since it's much more natural * with DT to provide them just assume that a DT enabled * system has full constraints. */ if (of_have_populated_dt()) has_full_constraints = true; mutex_lock(&regulator_list_mutex); /* If we have a full configuration then disable any regulators * we have permission to change the status for and which are * not in use or always_on. This is effectively the default * for DT and ACPI as they have full constraints. */ list_for_each_entry(rdev, &regulator_list, list) { ops = rdev->desc->ops; c = rdev->constraints; if (c && c->always_on) continue; if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS)) continue; mutex_lock(&rdev->mutex); if (rdev->use_count) goto unlock; /* If we can't read the status assume it's on. */ if (ops->is_enabled) enabled = ops->is_enabled(rdev); else enabled = 1; if (!enabled) goto unlock; if (have_full_constraints()) { /* We log since this may kill the system if it * goes wrong. */ rdev_info(rdev, "disabling\n"); ret = _regulator_do_disable(rdev); if (ret != 0) rdev_err(rdev, "couldn't disable: %d\n", ret); } else { /* The intention is that in future we will * assume that full constraints are provided * so warn even if we aren't going to do * anything here. */ rdev_warn(rdev, "incomplete constraints, leaving on\n"); } unlock: mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return 0; } late_initcall_sync(regulator_init_complete);
./CrossVul/dataset_final_sorted/CWE-416/c/good_2440_0
crossvul-cpp_data_bad_5021_7
/* * IPv6 BSD socket options interface * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/net/ipv4/ip_sockglue.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * FIXME: Make the setsockopt code POSIX compliant: That is * * o Truncate getsockopt returns * o Return an optlen of the truncated length if need be * * Changes: * David L Stevens <dlstevens@us.ibm.com>: * - added multicast source filtering API for MLDv2 */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/mroute6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/netfilter.h> #include <linux/slab.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/inet_common.h> #include <net/tcp.h> #include <net/udp.h> #include <net/udplite.h> #include <net/xfrm.h> #include <net/compat.h> #include <asm/uaccess.h> struct ip6_ra_chain *ip6_ra_chain; DEFINE_RWLOCK(ip6_ra_lock); int ip6_ra_control(struct sock *sk, int sel) { struct ip6_ra_chain *ra, *new_ra, **rap; /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) return -ENOPROTOOPT; new_ra = (sel >= 0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; write_lock_bh(&ip6_ra_lock); for (rap = &ip6_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { if (ra->sk == sk) { if (sel >= 0) { write_unlock_bh(&ip6_ra_lock); kfree(new_ra); return -EADDRINUSE; } *rap = ra->next; write_unlock_bh(&ip6_ra_lock); sock_put(sk); kfree(ra); return 0; } } if (!new_ra) { write_unlock_bh(&ip6_ra_lock); return -ENOBUFS; } new_ra->sk = sk; new_ra->sel = sel; new_ra->next = ra; *rap = new_ra; sock_hold(sk); write_unlock_bh(&ip6_ra_lock); return 0; } static struct ipv6_txoptions *ipv6_update_options(struct sock *sk, struct ipv6_txoptions *opt) { if (inet_sk(sk)->is_icsk) { if (opt && !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); } } opt = xchg(&inet6_sk(sk)->opt, opt); sk_dst_reset(sk); return opt; } static bool setsockopt_needs_rtnl(int optname) { switch (optname) { case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: case MCAST_MSFILTER: return true; } return false; } static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int val, valbool; int retv = -ENOPROTOOPT; bool needs_rtnl = setsockopt_needs_rtnl(optname); if (!optval) val = 0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else val = 0; } valbool = (val != 0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IPV6_ADDRFORM: if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { struct ipv6_txoptions *opt; struct sk_buff *pktopt; if (sk->sk_type == SOCK_RAW) break; if (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_UDPLITE) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET6) { retv = -EBUSY; break; } } else if (sk->sk_protocol != IPPROTO_TCP) break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; } if (ipv6_only_sock(sk) || !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { retv = -EADDRNOTAVAIL; break; } fl6_free_socklist(sk); ipv6_sock_mc_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so * remove it from the refcnt debug socks count in the * original family... */ sk_refcnt_debug_dec(sk); if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); local_bh_enable(); sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } else { struct proto *prot = &udp_prot; if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); local_bh_enable(); sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } opt = xchg(&np->opt, NULL); if (opt) sock_kfree_s(sk, opt, opt->tot_len); pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); sk->sk_destruct = inet_sock_destruct; /* * ... and add it to the refcnt debug socks count * in the new family. -acme */ sk_refcnt_debug_inc(sk); module_put(THIS_MODULE); retv = 0; break; } goto e_inval; case IPV6_V6ONLY: if (optlen < sizeof(int) || inet_sk(sk)->inet_num) goto e_inval; sk->sk_ipv6only = valbool; retv = 0; break; case IPV6_RECVPKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxinfo = valbool; retv = 0; break; case IPV6_2292PKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxoinfo = valbool; retv = 0; break; case IPV6_RECVHOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxhlim = valbool; retv = 0; break; case IPV6_2292HOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxohlim = valbool; retv = 0; break; case IPV6_RECVRTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.srcrt = valbool; retv = 0; break; case IPV6_2292RTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.osrcrt = valbool; retv = 0; break; case IPV6_RECVHOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.hopopts = valbool; retv = 0; break; case IPV6_2292HOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.ohopopts = valbool; retv = 0; break; case IPV6_RECVDSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.dstopts = valbool; retv = 0; break; case IPV6_2292DSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.odstopts = valbool; retv = 0; break; case IPV6_TCLASS: if (optlen < sizeof(int)) goto e_inval; if (val < -1 || val > 0xff) goto e_inval; /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; np->tclass = val; retv = 0; break; case IPV6_RECVTCLASS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxtclass = valbool; retv = 0; break; case IPV6_FLOWINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxflow = valbool; retv = 0; break; case IPV6_RECVPATHMTU: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxpmtu = valbool; retv = 0; break; case IPV6_TRANSPARENT: if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && !ns_capable(net->user_ns, CAP_NET_RAW)) { retv = -EPERM; break; } if (optlen < sizeof(int)) goto e_inval; /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ inet_sk(sk)->transparent = valbool; retv = 0; break; case IPV6_RECVORIGDSTADDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxorigdstaddr = valbool; retv = 0; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; /* remove any sticky options header with a zero option * length, per RFC3542. */ if (optlen == 0) optval = NULL; else if (!optval) goto e_inval; else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; /* hop-by-hop / destination options are privileged option */ retv = -EPERM; if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; opt = ipv6_renew_options(sk, np->opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; } /* routing header option needs extra check */ retv = -EINVAL; if (optname == IPV6_RTHDR && opt && opt->srcrt) { struct ipv6_rt_hdr *rthdr = opt->srcrt; switch (rthdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (rthdr->hdrlen != 2 || rthdr->segments_left != 1) goto sticky_done; break; #endif default: goto sticky_done; } } retv = 0; opt = ipv6_update_options(sk, opt); sticky_done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_PKTINFO: { struct in6_pktinfo pkt; if (optlen == 0) goto e_inval; else if (optlen < sizeof(struct in6_pktinfo) || !optval) goto e_inval; if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { retv = -EFAULT; break; } if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if) goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; retv = 0; break; } case IPV6_2292PKTOPTIONS: { struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; int junk; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; if (optlen == 0) goto update; /* 1K is probably excessive * 1K is surely not enough, 2K per standard header is 16K. */ retv = -EINVAL; if (optlen > 64*1024) break; opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); retv = -ENOBUFS; if (!opt) break; memset(opt, 0, sizeof(*opt)); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) goto done; msg.msg_controllen = optlen; msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); if (retv) goto done; update: retv = 0; opt = ipv6_update_options(sk, opt); done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_UNICAST_HOPS: if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->hop_limit = val; retv = 0; break; case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); retv = 0; break; case IPV6_MULTICAST_LOOP: if (optlen < sizeof(int)) goto e_inval; if (val != valbool) goto e_inval; np->mc_loop = valbool; retv = 0; break; case IPV6_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { np->ucast_oif = 0; retv = 0; break; } dev = dev_get_by_index(net, ifindex); retv = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); retv = -EINVAL; if (sk->sk_bound_dev_if) break; np->ucast_oif = ifindex; retv = 0; break; } case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val) { struct net_device *dev; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) goto e_inval; dev = dev_get_by_index(net, val); if (!dev) { retv = -ENODEV; break; } dev_put(dev); } np->mcast_oif = val; retv = 0; break; case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EPROTO; if (inet_sk(sk)->is_icsk) break; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); else retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); break; } case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); else retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in6 *psin6; if (optlen < sizeof(struct group_req)) goto e_inval; retv = -EFAULT; if (copy_from_user(&greq, optval, sizeof(struct group_req))) break; if (greq.gr_group.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; int omode, add; if (optlen < sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { retv = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET6 || greqs.gsr_source.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, &psin6->sin6_addr); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } retv = ip6_mc_source(add, omode, sk, &greqs); break; } case MCAST_MSFILTER: { struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { retv = -ENOBUFS; break; } gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { retv = -ENOBUFS; break; } retv = -EFAULT; if (copy_from_user(gsf, optval, optlen)) { kfree(gsf); break; } /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffffU || gsf->gf_numsrc > sysctl_mld_max_msf) { kfree(gsf); retv = -ENOBUFS; break; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { kfree(gsf); retv = -EINVAL; break; } retv = ip6_mc_msfilter(sk, gsf); kfree(gsf); break; } case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) goto e_inval; retv = ip6_ra_control(sk, val); break; case IPV6_MTU_DISCOVER: if (optlen < sizeof(int)) goto e_inval; if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT) goto e_inval; np->pmtudisc = val; retv = 0; break; case IPV6_MTU: if (optlen < sizeof(int)) goto e_inval; if (val && val < IPV6_MIN_MTU) goto e_inval; np->frag_size = val; retv = 0; break; case IPV6_RECVERR: if (optlen < sizeof(int)) goto e_inval; np->recverr = valbool; if (!val) skb_queue_purge(&sk->sk_error_queue); retv = 0; break; case IPV6_FLOWINFO_SEND: if (optlen < sizeof(int)) goto e_inval; np->sndflow = valbool; retv = 0; break; case IPV6_FLOWLABEL_MGR: retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; case IPV6_ADDR_PREFERENCES: { unsigned int pref = 0; unsigned int prefmask = ~0; if (optlen < sizeof(int)) goto e_inval; retv = -EINVAL; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP| IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: break; case 0: goto pref_skip_pubtmp; default: goto e_inval; } prefmask &= ~(IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP); pref_skip_pubtmp: /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; case 0: goto pref_skip_coa; default: goto e_inval; } prefmask &= ~IPV6_PREFER_SRC_COA; pref_skip_coa: /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: goto e_inval; } np->srcprefs = (np->srcprefs & prefmask) | pref; retv = 0; break; } case IPV6_MINHOPCOUNT: if (optlen < sizeof(int)) goto e_inval; if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; retv = 0; break; case IPV6_AUTOFLOWLABEL: np->autoflowlabel = valbool; retv = 0; break; } release_sock(sk); if (needs_rtnl) rtnl_unlock(); return retv; e_inval: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; } int ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int err; if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.setsockopt(sk, level, optname, optval, optlen); if (level != SOL_IPV6) return -ENOPROTOOPT; err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) { lock_sock(sk); err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); release_sock(sk); } #endif return err; } EXPORT_SYMBOL(ipv6_setsockopt); #ifdef CONFIG_COMPAT int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int err; if (level == SOL_IP && sk->sk_type != SOCK_RAW) { if (udp_prot.compat_setsockopt != NULL) return udp_prot.compat_setsockopt(sk, level, optname, optval, optlen); return udp_prot.setsockopt(sk, level, optname, optval, optlen); } if (level != SOL_IPV6) return -ENOPROTOOPT; if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER) return compat_mc_setsockopt(sk, level, optname, optval, optlen, ipv6_setsockopt); err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) { lock_sock(sk); err = compat_nf_setsockopt(sk, PF_INET6, optname, optval, optlen); release_sock(sk); } #endif return err; } EXPORT_SYMBOL(compat_ipv6_setsockopt); #endif static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, int optname, char __user *optval, int len) { struct ipv6_opt_hdr *hdr; if (!opt) return 0; switch (optname) { case IPV6_HOPOPTS: hdr = opt->hopopt; break; case IPV6_RTHDRDSTOPTS: hdr = opt->dst0opt; break; case IPV6_RTHDR: hdr = (struct ipv6_opt_hdr *)opt->srcrt; break; case IPV6_DSTOPTS: hdr = opt->dst1opt; break; default: return -EINVAL; /* should not happen */ } if (!hdr) return 0; len = min_t(unsigned int, len, ipv6_optlen(hdr)); if (copy_to_user(optval, hdr, len)) return -EFAULT; return len; } static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); int len; int val; if (ip6_mroute_opt(optname)) return ip6_mroute_getsockopt(sk, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; switch (optname) { case IPV6_ADDRFORM: if (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_UDPLITE && sk->sk_protocol != IPPROTO_TCP) return -ENOPROTOOPT; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; val = sk->sk_family; break; case MCAST_MSFILTER: { struct group_filter gsf; int err; if (len < GROUP_FILTER_SIZE(0)) return -EINVAL; if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) return -EFAULT; if (gsf.gf_group.ss_family != AF_INET6) return -EADDRNOTAVAIL; lock_sock(sk); err = ip6_mc_msfget(sk, &gsf, (struct group_filter __user *)optval, optlen); release_sock(sk); return err; } case IPV6_2292PKTOPTIONS: { struct msghdr msg; struct sk_buff *skb; if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; msg.msg_control = optval; msg.msg_controllen = len; msg.msg_flags = flags; lock_sock(sk); skb = np->pktoptions; if (skb) ip6_datagram_recv_ctl(sk, &msg, skb); release_sock(sk); if (!skb) { if (np->rxopt.bits.rxinfo) { struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr; put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxhlim) { int hlim = np->mcast_hops; put_cmsg(&msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim); } if (np->rxopt.bits.rxtclass) { int tclass = (int)ip6_tclass(np->rcv_flowinfo); put_cmsg(&msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); } if (np->rxopt.bits.rxoinfo) { struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr; put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { int hlim = np->mcast_hops; put_cmsg(&msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim); } if (np->rxopt.bits.rxflow) { __be32 flowinfo = np->rcv_flowinfo; put_cmsg(&msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); } } len -= msg.msg_controllen; return put_user(len, optlen); } case IPV6_MTU: { struct dst_entry *dst; val = 0; rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) val = dst_mtu(dst); rcu_read_unlock(); if (!val) return -ENOTCONN; break; } case IPV6_V6ONLY: val = sk->sk_ipv6only; break; case IPV6_RECVPKTINFO: val = np->rxopt.bits.rxinfo; break; case IPV6_2292PKTINFO: val = np->rxopt.bits.rxoinfo; break; case IPV6_RECVHOPLIMIT: val = np->rxopt.bits.rxhlim; break; case IPV6_2292HOPLIMIT: val = np->rxopt.bits.rxohlim; break; case IPV6_RECVRTHDR: val = np->rxopt.bits.srcrt; break; case IPV6_2292RTHDR: val = np->rxopt.bits.osrcrt; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { lock_sock(sk); len = ipv6_getsockopt_sticky(sk, np->opt, optname, optval, len); release_sock(sk); /* check if ipv6_getsockopt_sticky() returns err code */ if (len < 0) return len; return put_user(len, optlen); } case IPV6_RECVHOPOPTS: val = np->rxopt.bits.hopopts; break; case IPV6_2292HOPOPTS: val = np->rxopt.bits.ohopopts; break; case IPV6_RECVDSTOPTS: val = np->rxopt.bits.dstopts; break; case IPV6_2292DSTOPTS: val = np->rxopt.bits.odstopts; break; case IPV6_TCLASS: val = np->tclass; break; case IPV6_RECVTCLASS: val = np->rxopt.bits.rxtclass; break; case IPV6_FLOWINFO: val = np->rxopt.bits.rxflow; break; case IPV6_RECVPATHMTU: val = np->rxopt.bits.rxpmtu; break; case IPV6_PATHMTU: { struct dst_entry *dst; struct ip6_mtuinfo mtuinfo; if (len < sizeof(mtuinfo)) return -EINVAL; len = sizeof(mtuinfo); memset(&mtuinfo, 0, sizeof(mtuinfo)); rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) mtuinfo.ip6m_mtu = dst_mtu(dst); rcu_read_unlock(); if (!mtuinfo.ip6m_mtu) return -ENOTCONN; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &mtuinfo, len)) return -EFAULT; return 0; } case IPV6_TRANSPARENT: val = inet_sk(sk)->transparent; break; case IPV6_RECVORIGDSTADDR: val = np->rxopt.bits.rxorigdstaddr; break; case IPV6_UNICAST_HOPS: case IPV6_MULTICAST_HOPS: { struct dst_entry *dst; if (optname == IPV6_UNICAST_HOPS) val = np->hop_limit; else val = np->mcast_hops; if (val < 0) { rcu_read_lock(); dst = __sk_dst_get(sk); if (dst) val = ip6_dst_hoplimit(dst); rcu_read_unlock(); } if (val < 0) val = sock_net(sk)->ipv6.devconf_all->hop_limit; break; } case IPV6_MULTICAST_LOOP: val = np->mc_loop; break; case IPV6_MULTICAST_IF: val = np->mcast_oif; break; case IPV6_UNICAST_IF: val = (__force int)htonl((__u32) np->ucast_oif); break; case IPV6_MTU_DISCOVER: val = np->pmtudisc; break; case IPV6_RECVERR: val = np->recverr; break; case IPV6_FLOWINFO_SEND: val = np->sndflow; break; case IPV6_FLOWLABEL_MGR: { struct in6_flowlabel_req freq; int flags; if (len < sizeof(freq)) return -EINVAL; if (copy_from_user(&freq, optval, sizeof(freq))) return -EFAULT; if (freq.flr_action != IPV6_FL_A_GET) return -EINVAL; len = sizeof(freq); flags = freq.flr_flags; memset(&freq, 0, sizeof(freq)); val = ipv6_flowlabel_opt_get(sk, &freq, flags); if (val < 0) return val; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &freq, len)) return -EFAULT; return 0; } case IPV6_ADDR_PREFERENCES: val = 0; if (np->srcprefs & IPV6_PREFER_SRC_TMP) val |= IPV6_PREFER_SRC_TMP; else if (np->srcprefs & IPV6_PREFER_SRC_PUBLIC) val |= IPV6_PREFER_SRC_PUBLIC; else { /* XXX: should we return system default? */ val |= IPV6_PREFER_SRC_PUBTMP_DEFAULT; } if (np->srcprefs & IPV6_PREFER_SRC_COA) val |= IPV6_PREFER_SRC_COA; else val |= IPV6_PREFER_SRC_HOME; break; case IPV6_MINHOPCOUNT: val = np->min_hopcount; break; case IPV6_DONTFRAG: val = np->dontfrag; break; case IPV6_AUTOFLOWLABEL: val = np->autoflowlabel; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int err; if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.getsockopt(sk, level, optname, optval, optlen); if (level != SOL_IPV6) return -ENOPROTOOPT; err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { int len; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); release_sock(sk); if (err >= 0) err = put_user(len, optlen); } #endif return err; } EXPORT_SYMBOL(ipv6_getsockopt); #ifdef CONFIG_COMPAT int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int err; if (level == SOL_IP && sk->sk_type != SOCK_RAW) { if (udp_prot.compat_getsockopt != NULL) return udp_prot.compat_getsockopt(sk, level, optname, optval, optlen); return udp_prot.getsockopt(sk, level, optname, optval, optlen); } if (level != SOL_IPV6) return -ENOPROTOOPT; if (optname == MCAST_MSFILTER) return compat_mc_getsockopt(sk, level, optname, optval, optlen, ipv6_getsockopt); err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, MSG_CMSG_COMPAT); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { int len; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len); release_sock(sk); if (err >= 0) err = put_user(len, optlen); } #endif return err; } EXPORT_SYMBOL(compat_ipv6_getsockopt); #endif
./CrossVul/dataset_final_sorted/CWE-416/c/bad_5021_7
crossvul-cpp_data_good_282_0
/* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <rdma/rdma_user_cm.h> #include <rdma/ib_marshall.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, .maxlen = sizeof max_backlog, .mode = 0644, .proc_handler = proc_dointvec, }, { } }; struct ucma_file { struct mutex mut; struct file *filp; struct list_head ctx_list; struct list_head event_list; wait_queue_head_t poll_wait; struct workqueue_struct *close_wq; }; struct ucma_context { int id; struct completion comp; atomic_t ref; int events_reported; int backlog; struct ucma_file *file; struct rdma_cm_id *cm_id; u64 uid; struct list_head list; struct list_head mc_list; /* mark that device is in process of destroying the internal HW * resources, protected by the global mut */ int closing; /* sync between removal event and id destroy, protected by file mut */ int destroying; struct work_struct close_work; }; struct ucma_multicast { struct ucma_context *ctx; int id; int events_reported; u64 uid; u8 join_state; struct list_head list; struct sockaddr_storage addr; }; struct ucma_event { struct ucma_context *ctx; struct ucma_multicast *mc; struct list_head list; struct rdma_cm_id *cm_id; struct rdma_ucm_event_resp resp; struct work_struct close_work; }; static DEFINE_MUTEX(mut); static DEFINE_IDR(ctx_idr); static DEFINE_IDR(multicast_idr); static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) { struct ucma_context *ctx; ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file || !ctx->cm_id) ctx = ERR_PTR(-EINVAL); return ctx; } static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) { struct ucma_context *ctx; mutex_lock(&mut); ctx = _ucma_find_context(id, file); if (!IS_ERR(ctx)) { if (ctx->closing) ctx = ERR_PTR(-EIO); else atomic_inc(&ctx->ref); } mutex_unlock(&mut); return ctx; } static void ucma_put_ctx(struct ucma_context *ctx) { if (atomic_dec_and_test(&ctx->ref)) complete(&ctx->comp); } /* * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the * CM_ID is bound. */ static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) { struct ucma_context *ctx = ucma_get_ctx(file, id); if (IS_ERR(ctx)) return ctx; if (!ctx->cm_id->device) { ucma_put_ctx(ctx); return ERR_PTR(-EINVAL); } return ctx; } static void ucma_close_event_id(struct work_struct *work) { struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); rdma_destroy_id(uevent_close->cm_id); kfree(uevent_close); } static void ucma_close_id(struct work_struct *work) { struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); /* once all inflight tasks are finished, we close all underlying * resources. The context is still alive till its explicit destryoing * by its creator. */ ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); } static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) { struct ucma_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; INIT_WORK(&ctx->close_work, ucma_close_id); atomic_set(&ctx->ref, 1); init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; mutex_lock(&mut); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (ctx->id < 0) goto error; list_add_tail(&ctx->list, &file->ctx_list); return ctx; error: kfree(ctx); return NULL; } static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc; mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) return NULL; mutex_lock(&mut); mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (mc->id < 0) goto error; mc->ctx = ctx; list_add_tail(&mc->list, &ctx->mc_list); return mc; error: kfree(mc); return NULL; } static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, struct rdma_conn_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; dst->responder_resources =src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; } static void ucma_copy_ud_event(struct ib_device *device, struct rdma_ucm_ud_param *dst, struct rdma_ud_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); dst->qp_num = src->qp_num; dst->qkey = src->qkey; } static void ucma_set_event_context(struct ucma_context *ctx, struct rdma_cm_event *event, struct ucma_event *uevent) { uevent->ctx = ctx; switch (event->event) { case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: uevent->mc = (struct ucma_multicast *) event->param.ud.private_data; uevent->resp.uid = uevent->mc->uid; uevent->resp.id = uevent->mc->id; break; default: uevent->resp.uid = ctx->uid; uevent->resp.id = ctx->id; break; } } /* Called with file->mut locked for the relevant context. */ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) { struct ucma_context *ctx = cm_id->context; struct ucma_event *con_req_eve; int event_found = 0; if (ctx->destroying) return; /* only if context is pointing to cm_id that it owns it and can be * queued to be closed, otherwise that cm_id is an inflight one that * is part of that context event list pending to be detached and * reattached to its new context as part of ucma_get_event, * handled separately below. */ if (ctx->cm_id == cm_id) { mutex_lock(&mut); ctx->closing = 1; mutex_unlock(&mut); queue_work(ctx->file->close_wq, &ctx->close_work); return; } list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { if (con_req_eve->cm_id == cm_id && con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { list_del(&con_req_eve->list); INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); queue_work(ctx->file->close_wq, &con_req_eve->close_work); event_found = 1; break; } } if (!event_found) pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); } static int ucma_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_event *uevent; struct ucma_context *ctx = cm_id->context; int ret = 0; uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); if (!uevent) return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; mutex_lock(&ctx->file->mut); uevent->cm_id = cm_id; ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; uevent->resp.status = event->status; if (cm_id->qp_type == IB_QPT_UD) ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud, &event->param.ud); else ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { ret = -ENOMEM; kfree(uevent); goto out; } ctx->backlog--; } else if (!ctx->uid || ctx->cm_id != cm_id) { /* * We ignore events for new connections until userspace has set * their context. This can only happen if an error occurs on a * new connection before the user accepts it. This is okay, * since the accept will just fail later. However, we do need * to release the underlying HW resources in case of a device * removal event. */ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) ucma_removal_event_handler(cm_id); kfree(uevent); goto out; } list_add_tail(&uevent->list, &ctx->file->event_list); wake_up_interruptible(&ctx->file->poll_wait); if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) ucma_removal_event_handler(cm_id); out: mutex_unlock(&ctx->file->mut); return ret; } static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct ucma_context *ctx; struct rdma_ucm_get_event cmd; struct ucma_event *uevent; int ret = 0; /* * Old 32 bit user space does not send the 4 byte padding in the * reserved field. We don't care, allow it to keep working. */ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->mut); while (list_empty(&file->event_list)) { mutex_unlock(&file->mut); if (file->filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->event_list))) return -ERESTARTSYS; mutex_lock(&file->mut); } uevent = list_entry(file->event_list.next, struct ucma_event, list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { ctx = ucma_alloc_ctx(file); if (!ctx) { ret = -ENOMEM; goto done; } uevent->ctx->backlog++; ctx->cm_id = uevent->cm_id; ctx->cm_id->context = ctx; uevent->resp.id = ctx->id; } if (copy_to_user(u64_to_user_ptr(cmd.response), &uevent->resp, min_t(size_t, out_len, sizeof(uevent->resp)))) { ret = -EFAULT; goto done; } list_del(&uevent->list); uevent->ctx->events_reported++; if (uevent->mc) uevent->mc->events_reported++; kfree(uevent); done: mutex_unlock(&file->mut); return ret; } static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) { switch (cmd->ps) { case RDMA_PS_TCP: *qp_type = IB_QPT_RC; return 0; case RDMA_PS_UDP: case RDMA_PS_IPOIB: *qp_type = IB_QPT_UD; return 0; case RDMA_PS_IB: *qp_type = cmd->qp_type; return 0; default: return -EINVAL; } } static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ret = ucma_get_qp_type(&cmd, &qp_type); if (ret) return ret; mutex_lock(&file->mut); ctx = ucma_alloc_ctx(file); mutex_unlock(&file->mut); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; cm_id = __rdma_create_id(current->nsproxy->net_ns, ucma_event_handler, ctx, cmd.ps, qp_type, NULL); if (IS_ERR(cm_id)) { ret = PTR_ERR(cm_id); goto err1; } resp.id = ctx->id; if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { ret = -EFAULT; goto err2; } ctx->cm_id = cm_id; return 0; err2: rdma_destroy_id(cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); mutex_lock(&file->mut); list_del(&ctx->list); mutex_unlock(&file->mut); kfree(ctx); return ret; } static void ucma_cleanup_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc, *tmp; mutex_lock(&mut); list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { list_del(&mc->list); idr_remove(&multicast_idr, mc->id); kfree(mc); } mutex_unlock(&mut); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { if (uevent->mc != mc) continue; list_del(&uevent->list); kfree(uevent); } } /* * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At * this point, no new events will be reported from the hardware. However, we * still need to cleanup the UCMA context for this ID. Specifically, there * might be events that have not yet been consumed by the user space software. * These might include pending connect requests which we have not completed * processing. We cannot call rdma_destroy_id while holding the lock of the * context (file->mut), as it might cause a deadlock. We therefore extract all * relevant events from the context pending events list while holding the * mutex. After that we release them as needed. */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); ucma_cleanup_multicast(ctx); /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx == ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { list_del(&uevent->list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } events_reported = ctx->events_reported; kfree(ctx); return events_reported; } static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_context *ctx; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&mut); ctx = _ucma_find_context(cmd.id, file); if (!IS_ERR(ctx)) idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->file->mut); ctx->destroying = 1; mutex_unlock(&ctx->file->mut); flush_workqueue(ctx->file->close_wq); /* At this point it's guaranteed that there is no inflight * closing task */ mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); rdma_destroy_id(ctx->cm_id); } else { mutex_unlock(&mut); } resp.events_reported = ucma_free_ctx(ctx); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_in6(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || !cmd.addr_size || cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || !rdma_addr_size_in6(&cmd.dst_addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_addr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_route cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); /* fall through */ case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, (union ib_gid *)&resp->ib_route[0].dgid); rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, (union ib_gid *)&resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(0xffff); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); /* fall through */ case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); } static ssize_t ucma_query_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct rdma_ucm_query_route_resp resp; struct ucma_context *ctx; struct sockaddr *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (!ctx->cm_id->device) goto out; resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; ucma_put_ctx(ctx); return ret; } static void ucma_query_device_addr(struct rdma_cm_id *cm_id, struct rdma_ucm_query_addr_resp *resp) { if (!cm_id->device) return; resp->node_guid = (__force __u64) cm_id->device->node_guid; resp->port_num = cm_id->port_num; resp->pkey = (__force __u16) cpu_to_be16( ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); } static ssize_t ucma_query_addr(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; resp.src_size = rdma_addr_size(addr); memcpy(&resp.src_addr, addr, resp.src_size); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; resp.dst_size = rdma_addr_size(addr); memcpy(&resp.dst_addr, addr, resp.dst_size); ucma_query_device_addr(ctx->cm_id, &resp); if (copy_to_user(response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_query_path(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_path_resp *resp; int i, ret = 0; if (out_len < sizeof(*resp)) return -ENOSPC; resp = kzalloc(out_len, GFP_KERNEL); if (!resp) return -ENOMEM; resp->num_paths = ctx->cm_id->route.num_paths; for (i = 0, out_len -= sizeof(*resp); i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); i++, out_len -= sizeof(struct ib_path_rec_data)) { struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { struct sa_path_rec ib; sa_convert_path_opa_to_ib(&ib, rec); ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); } else { ib_sa_pack_path(rec, &resp->path_data[i].path_rec); } } if (copy_to_user(response, resp, sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) ret = -EFAULT; kfree(resp); return ret; } static ssize_t ucma_query_gid(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr_ib *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; memset(&resp, 0, sizeof resp); ucma_query_device_addr(ctx->cm_id, &resp); addr = (struct sockaddr_ib *) &resp.src_addr; resp.src_size = sizeof(*addr); if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr, NULL); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.src_addr); } addr = (struct sockaddr_ib *) &resp.dst_addr; resp.dst_size = sizeof(*addr); if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, NULL, (union ib_gid *)&addr->sib_addr); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr); } if (copy_to_user(response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_query(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct ucma_context *ctx; void __user *response; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; response = u64_to_user_ptr(cmd.response); ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_PATH: ret = ucma_query_path(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_GID: ret = ucma_query_gid(ctx, response, out_len); break; default: ret = -ENOSYS; break; } ucma_put_ctx(ctx); return ret; } static void ucma_copy_conn_param(struct rdma_cm_id *id, struct rdma_conn_param *dst, struct rdma_ucm_conn_param *src) { dst->private_data = src->private_data; dst->private_data_len = src->private_data_len; dst->responder_resources =src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_connect cmd; struct rdma_conn_param conn_param; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!cmd.conn_param.valid) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ret = rdma_connect(ctx->cm_id, &conn_param); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_listen cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; ret = rdma_listen(ctx->cm_id, ctx->backlog); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_accept cmd; struct rdma_conn_param conn_param; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); } else ret = __rdma_accept(ctx->cm_id, NULL, NULL); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_reject cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_disconnect cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_disconnect(ctx->cm_id); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_init_qp_attr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_init_qp_attr cmd; struct ib_uverbs_qp_attr resp; struct ucma_context *ctx; struct ib_qp_attr qp_attr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.qp_state > IB_QPS_ERR) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); if (ret) goto out; ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: ucma_put_ctx(ctx); return ret; } static int ucma_set_option_id(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret = 0; switch (optname) { case RDMA_OPTION_ID_TOS: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; case RDMA_OPTION_ID_REUSEADDR: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_AFONLY: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_ib_path(struct ucma_context *ctx, struct ib_path_rec_data *path_data, size_t optlen) { struct sa_path_rec sa_path; struct rdma_cm_event event; int ret; if (optlen % sizeof(*path_data)) return -EINVAL; for (; optlen; optlen -= sizeof(*path_data), path_data++) { if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL)) break; } if (!optlen) return -EINVAL; if (!ctx->cm_id->device) return -EINVAL; memset(&sa_path, 0, sizeof(sa_path)); sa_path.rec_type = SA_PATH_REC_TYPE_IB; ib_sa_unpack_path(path_data->path_rec, &sa_path); if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) { struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); ret = rdma_set_ib_path(ctx->cm_id, &opa); } else { ret = rdma_set_ib_path(ctx->cm_id, &sa_path); } if (ret) return ret; memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; return ucma_event_handler(ctx->cm_id, &event); } static int ucma_set_option_ib(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret; switch (optname) { case RDMA_OPTION_IB_PATH: ret = ucma_set_ib_path(ctx, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { int ret; switch (level) { case RDMA_OPTION_ID: ret = ucma_set_option_id(ctx, optname, optval, optlen); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_set_option cmd; struct ucma_context *ctx; void *optval; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); optval = memdup_user(u64_to_user_ptr(cmd.optval), cmd.optlen); if (IS_ERR(optval)) { ret = PTR_ERR(optval); goto out; } ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, cmd.optlen); kfree(optval); out: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_notify cmd; struct ucma_context *ctx; int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_process_join(struct ucma_file *file, struct rdma_ucm_join_mcast *cmd, int out_len) { struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct ucma_multicast *mc; struct sockaddr *addr; int ret; u8 join_state; if (out_len < sizeof(resp)) return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; if (cmd->addr_size != rdma_addr_size(addr)) return -EINVAL; if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) join_state = BIT(FULLMEMBER_JOIN); else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) join_state = BIT(SENDONLY_FULLMEMBER_JOIN); else return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&file->mut); mc = ucma_alloc_multicast(ctx); if (!mc) { ret = -ENOMEM; goto err1; } mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); if (ret) goto err2; resp.id = mc->id; if (copy_to_user(u64_to_user_ptr(cmd->response), &resp, sizeof(resp))) { ret = -EFAULT; goto err3; } mutex_lock(&mut); idr_replace(&multicast_idr, mc, mc->id); mutex_unlock(&mut); mutex_unlock(&file->mut); ucma_put_ctx(ctx); return 0; err3: rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); ucma_cleanup_mc_events(mc); err2: mutex_lock(&mut); idr_remove(&multicast_idr, mc->id); mutex_unlock(&mut); list_del(&mc->list); kfree(mc); err1: mutex_unlock(&file->mut); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_join_ip_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_ip_mcast cmd; struct rdma_ucm_join_mcast join_cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); if (!join_cmd.addr_size) return -EINVAL; join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); return ucma_process_join(file, &join_cmd, out_len); } static ssize_t ucma_join_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_mcast cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_kss(&cmd.addr)) return -EINVAL; return ucma_process_join(file, &cmd, out_len); } static ssize_t ucma_leave_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_multicast *mc; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&mut); mc = idr_find(&multicast_idr, cmd.id); if (!mc) mc = ERR_PTR(-ENOENT); else if (mc->ctx->file != file) mc = ERR_PTR(-EINVAL); else if (!atomic_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); else idr_remove(&multicast_idr, mc->id); mutex_unlock(&mut); if (IS_ERR(mc)) { ret = PTR_ERR(mc); goto out; } rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); mutex_unlock(&mc->ctx->file->mut); ucma_put_ctx(mc->ctx); resp.events_reported = mc->events_reported; kfree(mc); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: return ret; } static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) { /* Acquire mutex's based on pointer comparison to prevent deadlock. */ if (file1 < file2) { mutex_lock(&file1->mut); mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); } else { mutex_lock(&file2->mut); mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); } } static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) { if (file1 < file2) { mutex_unlock(&file2->mut); mutex_unlock(&file1->mut); } else { mutex_unlock(&file1->mut); mutex_unlock(&file2->mut); } } static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) if (uevent->ctx == ctx) list_move_tail(&uevent->list, &file->event_list); } static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; struct ucma_context *ctx; struct fd f; struct ucma_file *cur_file; int ret = 0; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; /* Get current fd to protect against it being closed */ f = fdget(cmd.fd); if (!f.file) return -ENOENT; /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(f.file->private_data, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; } cur_file = ctx->file; if (cur_file == new_file) { resp.events_reported = ctx->events_reported; goto response; } /* * Migrate events between fd's, maintaining order, and avoiding new * events being added before existing events. */ ucma_lock_files(cur_file, new_file); mutex_lock(&mut); list_move_tail(&ctx->list, &new_file->ctx_list); ucma_move_events(ctx, new_file); ctx->file = new_file; resp.events_reported = ctx->events_reported; mutex_unlock(&mut); ucma_unlock_files(cur_file, new_file); response: if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; ucma_put_ctx(ctx); file_put: fdput(f); return ret; } static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, [RDMA_USER_CM_CMD_REJECT] = ucma_reject, [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, [RDMA_USER_CM_CMD_GET_OPTION] = NULL, [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, [RDMA_USER_CM_CMD_QUERY] = ucma_query, [RDMA_USER_CM_CMD_BIND] = ucma_bind, [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast }; static ssize_t ucma_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct ucma_file *file = filp->private_data; struct rdma_ucm_cmd_hdr hdr; ssize_t ret; if (!ib_safe_file_access(filp)) { pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", task_tgid_vnr(current), current->comm); return -EACCES; } if (len < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) return -EINVAL; if (hdr.in + sizeof(hdr) > len) return -EINVAL; if (!ucma_cmd_table[hdr.cmd]) return -ENOSYS; ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); if (!ret) ret = len; return ret; } static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) { struct ucma_file *file = filp->private_data; __poll_t mask = 0; poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->event_list)) mask = EPOLLIN | EPOLLRDNORM; return mask; } /* * ucma_open() does not need the BKL: * * - no global state is referred to; * - there is no ioctl method to race against; * - no further module initialization is required for open to work * after the device is registered. */ static int ucma_open(struct inode *inode, struct file *filp) { struct ucma_file *file; file = kmalloc(sizeof *file, GFP_KERNEL); if (!file) return -ENOMEM; file->close_wq = alloc_ordered_workqueue("ucma_close_id", WQ_MEM_RECLAIM); if (!file->close_wq) { kfree(file); return -ENOMEM; } INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); filp->private_data = file; file->filp = filp; return nonseekable_open(inode, filp); } static int ucma_close(struct inode *inode, struct file *filp) { struct ucma_file *file = filp->private_data; struct ucma_context *ctx, *tmp; mutex_lock(&file->mut); list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { ctx->destroying = 1; mutex_unlock(&file->mut); mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); flush_workqueue(file->close_wq); /* At that step once ctx was marked as destroying and workqueue * was flushed we are safe from any inflights handlers that * might put other closing task. */ mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); /* rdma_destroy_id ensures that no event handlers are * inflight for that id before releasing it. */ rdma_destroy_id(ctx->cm_id); } else { mutex_unlock(&mut); } ucma_free_ctx(ctx); mutex_lock(&file->mut); } mutex_unlock(&file->mut); destroy_workqueue(file->close_wq); kfree(file); return 0; } static const struct file_operations ucma_fops = { .owner = THIS_MODULE, .open = ucma_open, .release = ucma_close, .write = ucma_write, .poll = ucma_poll, .llseek = no_llseek, }; static struct miscdevice ucma_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "rdma_cm", .nodename = "infiniband/rdma_cm", .mode = 0666, .fops = &ucma_fops, }; static ssize_t show_abi_version(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); } static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); static int __init ucma_init(void) { int ret; ret = misc_register(&ucma_misc); if (ret) return ret; ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); if (ret) { pr_err("rdma_ucm: couldn't create abi_version attr\n"); goto err1; } ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); if (!ucma_ctl_table_hdr) { pr_err("rdma_ucm: couldn't register sysctl paths\n"); ret = -ENOMEM; goto err2; } return 0; err2: device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); err1: misc_deregister(&ucma_misc); return ret; } static void __exit ucma_cleanup(void) { unregister_net_sysctl_table(ucma_ctl_table_hdr); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); idr_destroy(&ctx_idr); idr_destroy(&multicast_idr); } module_init(ucma_init); module_exit(ucma_cleanup);
./CrossVul/dataset_final_sorted/CWE-416/c/good_282_0
crossvul-cpp_data_bad_2440_0
/* * core.c -- Voltage/Current Regulator framework. * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * Copyright 2008 SlimLogic Ltd. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/async.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/module.h> #define CREATE_TRACE_POINTS #include <trace/events/regulator.h> #include "dummy.h" #include "internal.h" #define rdev_crit(rdev, fmt, ...) \ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_err(rdev, fmt, ...) \ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_warn(rdev, fmt, ...) \ pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_info(rdev, fmt, ...) \ pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_dbg(rdev, fmt, ...) \ pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) static DEFINE_MUTEX(regulator_list_mutex); static LIST_HEAD(regulator_list); static LIST_HEAD(regulator_map_list); static LIST_HEAD(regulator_ena_gpio_list); static LIST_HEAD(regulator_supply_alias_list); static bool has_full_constraints; static struct dentry *debugfs_root; /* * struct regulator_map * * Used to provide symbolic supply names to devices. */ struct regulator_map { struct list_head list; const char *dev_name; /* The dev_name() for the consumer */ const char *supply; struct regulator_dev *regulator; }; /* * struct regulator_enable_gpio * * Management for shared enable GPIO pin */ struct regulator_enable_gpio { struct list_head list; struct gpio_desc *gpiod; u32 enable_count; /* a number of enabled shared GPIO */ u32 request_count; /* a number of requested shared GPIO */ unsigned int ena_gpio_invert:1; }; /* * struct regulator_supply_alias * * Used to map lookups for a supply onto an alternative device. */ struct regulator_supply_alias { struct list_head list; struct device *src_dev; const char *src_supply; struct device *alias_dev; const char *alias_supply; }; static int _regulator_is_enabled(struct regulator_dev *rdev); static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); static const char *rdev_get_name(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->name) return rdev->constraints->name; else if (rdev->desc->name) return rdev->desc->name; else return ""; } static bool have_full_constraints(void) { return has_full_constraints || of_have_populated_dt(); } /** * of_get_regulator - get a regulator device node based on supply name * @dev: Device pointer for the consumer (of regulator) device * @supply: regulator supply name * * Extract the regulator device node corresponding to the supply name. * returns the device node corresponding to the regulator if found, else * returns NULL. */ static struct device_node *of_get_regulator(struct device *dev, const char *supply) { struct device_node *regnode = NULL; char prop_name[32]; /* 32 is max size of property name */ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply); snprintf(prop_name, 32, "%s-supply", supply); regnode = of_parse_phandle(dev->of_node, prop_name, 0); if (!regnode) { dev_dbg(dev, "Looking up %s property in node %s failed", prop_name, dev->of_node->full_name); return NULL; } return regnode; } static int _regulator_can_change_status(struct regulator_dev *rdev) { if (!rdev->constraints) return 0; if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS) return 1; else return 0; } /* Platform voltage constraint check */ static int regulator_check_voltage(struct regulator_dev *rdev, int *min_uV, int *max_uV) { BUG_ON(*min_uV > *max_uV); if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } if (*max_uV > rdev->constraints->max_uV) *max_uV = rdev->constraints->max_uV; if (*min_uV < rdev->constraints->min_uV) *min_uV = rdev->constraints->min_uV; if (*min_uV > *max_uV) { rdev_err(rdev, "unsupportable voltage range: %d-%duV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* Make sure we select a voltage that suits the needs of all * regulator consumers */ static int regulator_check_consumers(struct regulator_dev *rdev, int *min_uV, int *max_uV) { struct regulator *regulator; list_for_each_entry(regulator, &rdev->consumer_list, list) { /* * Assume consumers that didn't say anything are OK * with anything in the constraint range. */ if (!regulator->min_uV && !regulator->max_uV) continue; if (*max_uV > regulator->max_uV) *max_uV = regulator->max_uV; if (*min_uV < regulator->min_uV) *min_uV = regulator->min_uV; } if (*min_uV > *max_uV) { rdev_err(rdev, "Restricting voltage, %u-%uuV\n", *min_uV, *max_uV); return -EINVAL; } return 0; } /* current constraint check */ static int regulator_check_current_limit(struct regulator_dev *rdev, int *min_uA, int *max_uA) { BUG_ON(*min_uA > *max_uA); if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } if (*max_uA > rdev->constraints->max_uA) *max_uA = rdev->constraints->max_uA; if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; if (*min_uA > *max_uA) { rdev_err(rdev, "unsupportable current range: %d-%duA\n", *min_uA, *max_uA); return -EINVAL; } return 0; } /* operating mode constraint check */ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) { switch (*mode) { case REGULATOR_MODE_FAST: case REGULATOR_MODE_NORMAL: case REGULATOR_MODE_IDLE: case REGULATOR_MODE_STANDBY: break; default: rdev_err(rdev, "invalid mode %x specified\n", *mode); return -EINVAL; } if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } /* The modes are bitmasks, the most power hungry modes having * the lowest values. If the requested mode isn't supported * try higher modes. */ while (*mode) { if (rdev->constraints->valid_modes_mask & *mode) return 0; *mode /= 2; } return -EINVAL; } /* dynamic regulator mode switching constraint check */ static int regulator_check_drms(struct regulator_dev *rdev) { if (!rdev->constraints) { rdev_err(rdev, "no constraints\n"); return -ENODEV; } if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { rdev_err(rdev, "operation not allowed\n"); return -EPERM; } return 0; } static ssize_t regulator_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; mutex_lock(&rdev->mutex); ret = sprintf(buf, "%d\n", _regulator_get_voltage(rdev)); mutex_unlock(&rdev->mutex); return ret; } static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL); static ssize_t regulator_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev)); } static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%s\n", rdev_get_name(rdev)); } static DEVICE_ATTR_RO(name); static ssize_t regulator_print_opmode(char *buf, int mode) { switch (mode) { case REGULATOR_MODE_FAST: return sprintf(buf, "fast\n"); case REGULATOR_MODE_NORMAL: return sprintf(buf, "normal\n"); case REGULATOR_MODE_IDLE: return sprintf(buf, "idle\n"); case REGULATOR_MODE_STANDBY: return sprintf(buf, "standby\n"); } return sprintf(buf, "unknown\n"); } static ssize_t regulator_opmode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, _regulator_get_mode(rdev)); } static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL); static ssize_t regulator_print_state(char *buf, int state) { if (state > 0) return sprintf(buf, "enabled\n"); else if (state == 0) return sprintf(buf, "disabled\n"); else return sprintf(buf, "unknown\n"); } static ssize_t regulator_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); ssize_t ret; mutex_lock(&rdev->mutex); ret = regulator_print_state(buf, _regulator_is_enabled(rdev)); mutex_unlock(&rdev->mutex); return ret; } static DEVICE_ATTR(state, 0444, regulator_state_show, NULL); static ssize_t regulator_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); int status; char *label; status = rdev->desc->ops->get_status(rdev); if (status < 0) return status; switch (status) { case REGULATOR_STATUS_OFF: label = "off"; break; case REGULATOR_STATUS_ON: label = "on"; break; case REGULATOR_STATUS_ERROR: label = "error"; break; case REGULATOR_STATUS_FAST: label = "fast"; break; case REGULATOR_STATUS_NORMAL: label = "normal"; break; case REGULATOR_STATUS_IDLE: label = "idle"; break; case REGULATOR_STATUS_STANDBY: label = "standby"; break; case REGULATOR_STATUS_BYPASS: label = "bypass"; break; case REGULATOR_STATUS_UNDEFINED: label = "undefined"; break; default: return -ERANGE; } return sprintf(buf, "%s\n", label); } static DEVICE_ATTR(status, 0444, regulator_status_show, NULL); static ssize_t regulator_min_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uA); } static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL); static ssize_t regulator_max_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uA); } static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL); static ssize_t regulator_min_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->min_uV); } static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL); static ssize_t regulator_max_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); if (!rdev->constraints) return sprintf(buf, "constraint not defined\n"); return sprintf(buf, "%d\n", rdev->constraints->max_uV); } static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL); static ssize_t regulator_total_uA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); struct regulator *regulator; int uA = 0; mutex_lock(&rdev->mutex); list_for_each_entry(regulator, &rdev->consumer_list, list) uA += regulator->uA_load; mutex_unlock(&rdev->mutex); return sprintf(buf, "%d\n", uA); } static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL); static ssize_t num_users_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->use_count); } static DEVICE_ATTR_RO(num_users); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); switch (rdev->desc->type) { case REGULATOR_VOLTAGE: return sprintf(buf, "voltage\n"); case REGULATOR_CURRENT: return sprintf(buf, "current\n"); } return sprintf(buf, "unknown\n"); } static DEVICE_ATTR_RO(type); static ssize_t regulator_suspend_mem_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV); } static DEVICE_ATTR(suspend_mem_microvolts, 0444, regulator_suspend_mem_uV_show, NULL); static ssize_t regulator_suspend_disk_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV); } static DEVICE_ATTR(suspend_disk_microvolts, 0444, regulator_suspend_disk_uV_show, NULL); static ssize_t regulator_suspend_standby_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV); } static DEVICE_ATTR(suspend_standby_microvolts, 0444, regulator_suspend_standby_uV_show, NULL); static ssize_t regulator_suspend_mem_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_mem.mode); } static DEVICE_ATTR(suspend_mem_mode, 0444, regulator_suspend_mem_mode_show, NULL); static ssize_t regulator_suspend_disk_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_disk.mode); } static DEVICE_ATTR(suspend_disk_mode, 0444, regulator_suspend_disk_mode_show, NULL); static ssize_t regulator_suspend_standby_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_opmode(buf, rdev->constraints->state_standby.mode); } static DEVICE_ATTR(suspend_standby_mode, 0444, regulator_suspend_standby_mode_show, NULL); static ssize_t regulator_suspend_mem_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_mem.enabled); } static DEVICE_ATTR(suspend_mem_state, 0444, regulator_suspend_mem_state_show, NULL); static ssize_t regulator_suspend_disk_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_disk.enabled); } static DEVICE_ATTR(suspend_disk_state, 0444, regulator_suspend_disk_state_show, NULL); static ssize_t regulator_suspend_standby_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); return regulator_print_state(buf, rdev->constraints->state_standby.enabled); } static DEVICE_ATTR(suspend_standby_state, 0444, regulator_suspend_standby_state_show, NULL); static ssize_t regulator_bypass_show(struct device *dev, struct device_attribute *attr, char *buf) { struct regulator_dev *rdev = dev_get_drvdata(dev); const char *report; bool bypass; int ret; ret = rdev->desc->ops->get_bypass(rdev, &bypass); if (ret != 0) report = "unknown"; else if (bypass) report = "enabled"; else report = "disabled"; return sprintf(buf, "%s\n", report); } static DEVICE_ATTR(bypass, 0444, regulator_bypass_show, NULL); /* * These are the only attributes are present for all regulators. * Other attributes are a function of regulator functionality. */ static struct attribute *regulator_dev_attrs[] = { &dev_attr_name.attr, &dev_attr_num_users.attr, &dev_attr_type.attr, NULL, }; ATTRIBUTE_GROUPS(regulator_dev); static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev); kfree(rdev); } static struct class regulator_class = { .name = "regulator", .dev_release = regulator_dev_release, .dev_groups = regulator_dev_groups, }; /* Calculate the new optimum regulator operating mode based on the new total * consumer load. All locks held by caller */ static void drms_uA_update(struct regulator_dev *rdev) { struct regulator *sibling; int current_uA = 0, output_uV, input_uV, err; unsigned int mode; err = regulator_check_drms(rdev); if (err < 0 || !rdev->desc->ops->get_optimum_mode || (!rdev->desc->ops->get_voltage && !rdev->desc->ops->get_voltage_sel) || !rdev->desc->ops->set_mode) return; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) return; /* get input voltage */ input_uV = 0; if (rdev->supply) input_uV = regulator_get_voltage(rdev->supply); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) return; /* calc total requested load */ list_for_each_entry(sibling, &rdev->consumer_list, list) current_uA += sibling->uA_load; /* now get the optimum mode for our new total regulator load */ mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, current_uA); /* check the new mode is allowed */ err = regulator_mode_constrain(rdev, &mode); if (err == 0) rdev->desc->ops->set_mode(rdev, mode); } static int suspend_set_state(struct regulator_dev *rdev, struct regulator_state *rstate) { int ret = 0; /* If we have no suspend mode configration don't set anything; * only warn if the driver implements set_suspend_voltage or * set_suspend_mode callback. */ if (!rstate->enabled && !rstate->disabled) { if (rdev->desc->ops->set_suspend_voltage || rdev->desc->ops->set_suspend_mode) rdev_warn(rdev, "No configuration\n"); return 0; } if (rstate->enabled && rstate->disabled) { rdev_err(rdev, "invalid configuration\n"); return -EINVAL; } if (rstate->enabled && rdev->desc->ops->set_suspend_enable) ret = rdev->desc->ops->set_suspend_enable(rdev); else if (rstate->disabled && rdev->desc->ops->set_suspend_disable) ret = rdev->desc->ops->set_suspend_disable(rdev); else /* OK if set_suspend_enable or set_suspend_disable is NULL */ ret = 0; if (ret < 0) { rdev_err(rdev, "failed to enabled/disable\n"); return ret; } if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) { ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV); if (ret < 0) { rdev_err(rdev, "failed to set voltage\n"); return ret; } } if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) { ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode); if (ret < 0) { rdev_err(rdev, "failed to set mode\n"); return ret; } } return ret; } /* locks held by caller */ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) { if (!rdev->constraints) return -EINVAL; switch (state) { case PM_SUSPEND_STANDBY: return suspend_set_state(rdev, &rdev->constraints->state_standby); case PM_SUSPEND_MEM: return suspend_set_state(rdev, &rdev->constraints->state_mem); case PM_SUSPEND_MAX: return suspend_set_state(rdev, &rdev->constraints->state_disk); default: return -EINVAL; } } static void print_constraints(struct regulator_dev *rdev) { struct regulation_constraints *constraints = rdev->constraints; char buf[80] = ""; int count = 0; int ret; if (constraints->min_uV && constraints->max_uV) { if (constraints->min_uV == constraints->max_uV) count += sprintf(buf + count, "%d mV ", constraints->min_uV / 1000); else count += sprintf(buf + count, "%d <--> %d mV ", constraints->min_uV / 1000, constraints->max_uV / 1000); } if (!constraints->min_uV || constraints->min_uV != constraints->max_uV) { ret = _regulator_get_voltage(rdev); if (ret > 0) count += sprintf(buf + count, "at %d mV ", ret / 1000); } if (constraints->uV_offset) count += sprintf(buf, "%dmV offset ", constraints->uV_offset / 1000); if (constraints->min_uA && constraints->max_uA) { if (constraints->min_uA == constraints->max_uA) count += sprintf(buf + count, "%d mA ", constraints->min_uA / 1000); else count += sprintf(buf + count, "%d <--> %d mA ", constraints->min_uA / 1000, constraints->max_uA / 1000); } if (!constraints->min_uA || constraints->min_uA != constraints->max_uA) { ret = _regulator_get_current_limit(rdev); if (ret > 0) count += sprintf(buf + count, "at %d mA ", ret / 1000); } if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) count += sprintf(buf + count, "fast "); if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL) count += sprintf(buf + count, "normal "); if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE) count += sprintf(buf + count, "idle "); if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) count += sprintf(buf + count, "standby"); if (!count) sprintf(buf, "no parameters"); rdev_dbg(rdev, "%s\n", buf); if ((constraints->min_uV != constraints->max_uV) && !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) rdev_warn(rdev, "Voltage range but no REGULATOR_CHANGE_VOLTAGE\n"); } static int machine_constraints_voltage(struct regulator_dev *rdev, struct regulation_constraints *constraints) { const struct regulator_ops *ops = rdev->desc->ops; int ret; /* do we need to apply the constraint voltage */ if (rdev->constraints->apply_uV && rdev->constraints->min_uV == rdev->constraints->max_uV) { int current_uV = _regulator_get_voltage(rdev); if (current_uV < 0) { rdev_err(rdev, "failed to get the current voltage(%d)\n", current_uV); return current_uV; } if (current_uV < rdev->constraints->min_uV || current_uV > rdev->constraints->max_uV) { ret = _regulator_do_set_voltage( rdev, rdev->constraints->min_uV, rdev->constraints->max_uV); if (ret < 0) { rdev_err(rdev, "failed to apply %duV constraint(%d)\n", rdev->constraints->min_uV, ret); return ret; } } } /* constrain machine-level voltage specs to fit * the actual range supported by this regulator. */ if (ops->list_voltage && rdev->desc->n_voltages) { int count = rdev->desc->n_voltages; int i; int min_uV = INT_MAX; int max_uV = INT_MIN; int cmin = constraints->min_uV; int cmax = constraints->max_uV; /* it's safe to autoconfigure fixed-voltage supplies and the constraints are used by list_voltage. */ if (count == 1 && !cmin) { cmin = 1; cmax = INT_MAX; constraints->min_uV = cmin; constraints->max_uV = cmax; } /* voltage constraints are optional */ if ((cmin == 0) && (cmax == 0)) return 0; /* else require explicit machine-level constraints */ if (cmin <= 0 || cmax <= 0 || cmax < cmin) { rdev_err(rdev, "invalid voltage constraints\n"); return -EINVAL; } /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ for (i = 0; i < count; i++) { int value; value = ops->list_voltage(rdev, i); if (value <= 0) continue; /* maybe adjust [min_uV..max_uV] */ if (value >= cmin && value < min_uV) min_uV = value; if (value <= cmax && value > max_uV) max_uV = value; } /* final: [min_uV..max_uV] valid iff constraints valid */ if (max_uV < min_uV) { rdev_err(rdev, "unsupportable voltage constraints %u-%uuV\n", min_uV, max_uV); return -EINVAL; } /* use regulator's subset of machine constraints */ if (constraints->min_uV < min_uV) { rdev_dbg(rdev, "override min_uV, %d -> %d\n", constraints->min_uV, min_uV); constraints->min_uV = min_uV; } if (constraints->max_uV > max_uV) { rdev_dbg(rdev, "override max_uV, %d -> %d\n", constraints->max_uV, max_uV); constraints->max_uV = max_uV; } } return 0; } static int machine_constraints_current(struct regulator_dev *rdev, struct regulation_constraints *constraints) { const struct regulator_ops *ops = rdev->desc->ops; int ret; if (!constraints->min_uA && !constraints->max_uA) return 0; if (constraints->min_uA > constraints->max_uA) { rdev_err(rdev, "Invalid current constraints\n"); return -EINVAL; } if (!ops->set_current_limit || !ops->get_current_limit) { rdev_warn(rdev, "Operation of current configuration missing\n"); return 0; } /* Set regulator current in constraints range */ ret = ops->set_current_limit(rdev, constraints->min_uA, constraints->max_uA); if (ret < 0) { rdev_err(rdev, "Failed to set current constraint, %d\n", ret); return ret; } return 0; } static int _regulator_do_enable(struct regulator_dev *rdev); /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source * @constraints: constraints to apply * * Allows platform initialisation code to define and constrain * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: * Constraints *must* be set by platform code in order for some * regulator operations to proceed i.e. set_voltage, set_current_limit, * set_mode. */ static int set_machine_constraints(struct regulator_dev *rdev, const struct regulation_constraints *constraints) { int ret = 0; const struct regulator_ops *ops = rdev->desc->ops; if (constraints) rdev->constraints = kmemdup(constraints, sizeof(*constraints), GFP_KERNEL); else rdev->constraints = kzalloc(sizeof(*constraints), GFP_KERNEL); if (!rdev->constraints) return -ENOMEM; ret = machine_constraints_voltage(rdev, rdev->constraints); if (ret != 0) goto out; ret = machine_constraints_current(rdev, rdev->constraints); if (ret != 0) goto out; /* do we need to setup our suspend state */ if (rdev->constraints->initial_state) { ret = suspend_prepare(rdev, rdev->constraints->initial_state); if (ret < 0) { rdev_err(rdev, "failed to set suspend state\n"); goto out; } } if (rdev->constraints->initial_mode) { if (!ops->set_mode) { rdev_err(rdev, "no set_mode operation\n"); ret = -EINVAL; goto out; } ret = ops->set_mode(rdev, rdev->constraints->initial_mode); if (ret < 0) { rdev_err(rdev, "failed to set initial mode: %d\n", ret); goto out; } } /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ if (rdev->constraints->always_on || rdev->constraints->boot_on) { ret = _regulator_do_enable(rdev); if (ret < 0 && ret != -EINVAL) { rdev_err(rdev, "failed to enable\n"); goto out; } } if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable) && ops->set_ramp_delay) { ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay); if (ret < 0) { rdev_err(rdev, "failed to set ramp_delay\n"); goto out; } } print_constraints(rdev); return 0; out: kfree(rdev->constraints); rdev->constraints = NULL; return ret; } /** * set_supply - set regulator supply regulator * @rdev: regulator name * @supply_rdev: supply regulator name * * Called by platform initialisation code to set the supply regulator for this * regulator. This ensures that a regulators supply will also be enabled by the * core if it's child is enabled. */ static int set_supply(struct regulator_dev *rdev, struct regulator_dev *supply_rdev) { int err; rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { err = -ENOMEM; return err; } supply_rdev->open_count++; return 0; } /** * set_consumer_device_supply - Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev_name: dev_name() string for device supply applies to * @supply: symbolic name for supply * * Allows platform initialisation code to map physical regulator * sources to symbolic names for supplies for use by devices. Devices * should use these symbolic names to request regulators, avoiding the * need to provide board-specific regulator names as platform data. */ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { struct regulator_map *node; int has_dev; if (supply == NULL) return -EINVAL; if (consumer_dev_name != NULL) has_dev = 1; else has_dev = 0; list_for_each_entry(node, &regulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) continue; } else if (node->dev_name || consumer_dev_name) { continue; } if (strcmp(node->supply, supply) != 0) continue; pr_debug("%s: %s/%s is '%s' supply; fail %s/%s\n", consumer_dev_name, dev_name(&node->regulator->dev), node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); return -EBUSY; } node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); if (node == NULL) return -ENOMEM; node->regulator = rdev; node->supply = supply; if (has_dev) { node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); if (node->dev_name == NULL) { kfree(node); return -ENOMEM; } } list_add(&node->list, &regulator_map_list); return 0; } static void unset_regulator_supplies(struct regulator_dev *rdev) { struct regulator_map *node, *n; list_for_each_entry_safe(node, n, &regulator_map_list, list) { if (rdev == node->regulator) { list_del(&node->list); kfree(node->dev_name); kfree(node); } } } #define REG_STR_SIZE 64 static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name) { struct regulator *regulator; char buf[REG_STR_SIZE]; int err, size; regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); if (regulator == NULL) return NULL; mutex_lock(&rdev->mutex); regulator->rdev = rdev; list_add(&regulator->list, &rdev->consumer_list); if (dev) { regulator->dev = dev; /* Add a link to the device sysfs entry */ size = scnprintf(buf, REG_STR_SIZE, "%s-%s", dev->kobj.name, supply_name); if (size >= REG_STR_SIZE) goto overflow_err; regulator->supply_name = kstrdup(buf, GFP_KERNEL); if (regulator->supply_name == NULL) goto overflow_err; err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj, buf); if (err) { rdev_warn(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); /* non-fatal */ } } else { regulator->supply_name = kstrdup(supply_name, GFP_KERNEL); if (regulator->supply_name == NULL) goto overflow_err; } regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs); if (!regulator->debugfs) { rdev_warn(rdev, "Failed to create debugfs directory\n"); } else { debugfs_create_u32("uA_load", 0444, regulator->debugfs, &regulator->uA_load); debugfs_create_u32("min_uV", 0444, regulator->debugfs, &regulator->min_uV); debugfs_create_u32("max_uV", 0444, regulator->debugfs, &regulator->max_uV); } /* * Check now if the regulator is an always on regulator - if * it is then we don't need to do nearly so much work for * enable/disable calls. */ if (!_regulator_can_change_status(rdev) && _regulator_is_enabled(rdev)) regulator->always_on = true; mutex_unlock(&rdev->mutex); return regulator; overflow_err: list_del(&regulator->list); kfree(regulator); mutex_unlock(&rdev->mutex); return NULL; } static int _regulator_get_enable_time(struct regulator_dev *rdev) { if (rdev->constraints && rdev->constraints->enable_time) return rdev->constraints->enable_time; if (!rdev->desc->ops->enable_time) return rdev->desc->enable_time; return rdev->desc->ops->enable_time(rdev); } static struct regulator_supply_alias *regulator_find_supply_alias( struct device *dev, const char *supply) { struct regulator_supply_alias *map; list_for_each_entry(map, &regulator_supply_alias_list, list) if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0) return map; return NULL; } static void regulator_supply_alias(struct device **dev, const char **supply) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(*dev, *supply); if (map) { dev_dbg(*dev, "Mapping supply %s to %s,%s\n", *supply, map->alias_supply, dev_name(map->alias_dev)); *dev = map->alias_dev; *supply = map->alias_supply; } } static struct regulator_dev *regulator_dev_lookup(struct device *dev, const char *supply, int *ret) { struct regulator_dev *r; struct device_node *node; struct regulator_map *map; const char *devname = NULL; regulator_supply_alias(&dev, &supply); /* first do a dt based lookup */ if (dev && dev->of_node) { node = of_get_regulator(dev, supply); if (node) { list_for_each_entry(r, &regulator_list, list) if (r->dev.parent && node == r->dev.of_node) return r; *ret = -EPROBE_DEFER; return NULL; } else { /* * If we couldn't even get the node then it's * not just that the device didn't register * yet, there's no node and we'll never * succeed. */ *ret = -ENODEV; } } /* if not found, try doing it non-dt way */ if (dev) devname = dev_name(dev); list_for_each_entry(r, &regulator_list, list) if (strcmp(rdev_get_name(r), supply) == 0) return r; list_for_each_entry(map, &regulator_map_list, list) { /* If the mapping has a device set up it must match */ if (map->dev_name && (!devname || strcmp(map->dev_name, devname))) continue; if (strcmp(map->supply, supply) == 0) return map->regulator; } return NULL; } /* Internal regulator request function */ static struct regulator *_regulator_get(struct device *dev, const char *id, bool exclusive, bool allow_dummy) { struct regulator_dev *rdev; struct regulator *regulator = ERR_PTR(-EPROBE_DEFER); const char *devname = NULL; int ret; if (id == NULL) { pr_err("get() with no identifier\n"); return ERR_PTR(-EINVAL); } if (dev) devname = dev_name(dev); if (have_full_constraints()) ret = -ENODEV; else ret = -EPROBE_DEFER; mutex_lock(&regulator_list_mutex); rdev = regulator_dev_lookup(dev, id, &ret); if (rdev) goto found; regulator = ERR_PTR(ret); /* * If we have return value from dev_lookup fail, we do not expect to * succeed, so, quit with appropriate error value */ if (ret && ret != -ENODEV) goto out; if (!devname) devname = "deviceless"; /* * Assume that a regulator is physically present and enabled * even if it isn't hooked up and just provide a dummy. */ if (have_full_constraints() && allow_dummy) { pr_warn("%s supply %s not found, using dummy regulator\n", devname, id); rdev = dummy_regulator_rdev; goto found; /* Don't log an error when called from regulator_get_optional() */ } else if (!have_full_constraints() || exclusive) { dev_warn(dev, "dummy supplies not allowed\n"); } mutex_unlock(&regulator_list_mutex); return regulator; found: if (rdev->exclusive) { regulator = ERR_PTR(-EPERM); goto out; } if (exclusive && rdev->open_count) { regulator = ERR_PTR(-EBUSY); goto out; } if (!try_module_get(rdev->owner)) goto out; regulator = create_regulator(rdev, dev, id); if (regulator == NULL) { regulator = ERR_PTR(-ENOMEM); module_put(rdev->owner); goto out; } rdev->open_count++; if (exclusive) { rdev->exclusive = 1; ret = _regulator_is_enabled(rdev); if (ret > 0) rdev->use_count = 1; else rdev->use_count = 0; } out: mutex_unlock(&regulator_list_mutex); return regulator; } /** * regulator_get - lookup and obtain a reference to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get(struct device *dev, const char *id) { return _regulator_get(dev, id, false, true); } EXPORT_SYMBOL_GPL(regulator_get); /** * regulator_get_exclusive - obtain exclusive access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. Other consumers will be * unable to obtain this regulator while this reference is held and the * use count for the regulator will be initialised to reflect the current * state of the regulator. * * This is intended for use by consumers which cannot tolerate shared * use of the regulator such as those which need to force the * regulator off for correct operation of the hardware they are * controlling. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) { return _regulator_get(dev, id, true, false); } EXPORT_SYMBOL_GPL(regulator_get_exclusive); /** * regulator_get_optional - obtain optional access to a regulator. * @dev: device for regulator "consumer" * @id: Supply name or regulator ID. * * Returns a struct regulator corresponding to the regulator producer, * or IS_ERR() condition containing errno. * * This is intended for use by consumers for devices which can have * some supplies unconnected in normal use, such as some MMC devices. * It can allow the regulator core to provide stub supplies for other * supplies requested using normal regulator_get() calls without * disrupting the operation of drivers that can handle absent * supplies. * * Use of supply names configured via regulator_set_device_supply() is * strongly encouraged. It is recommended that the supply name used * should match the name used for the supply and/or the relevant * device pins in the datasheet. */ struct regulator *regulator_get_optional(struct device *dev, const char *id) { return _regulator_get(dev, id, false, false); } EXPORT_SYMBOL_GPL(regulator_get_optional); /* Locks held by regulator_put() */ static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; if (regulator == NULL || IS_ERR(regulator)) return; rdev = regulator->rdev; debugfs_remove_recursive(regulator->debugfs); /* remove any sysfs entries */ if (regulator->dev) sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); kfree(regulator->supply_name); list_del(&regulator->list); kfree(regulator); rdev->open_count--; rdev->exclusive = 0; module_put(rdev->owner); } /** * regulator_put - "free" the regulator source * @regulator: regulator source * * Note: drivers must ensure that all regulator_enable calls made on this * regulator source are balanced by regulator_disable calls prior to calling * this function. */ void regulator_put(struct regulator *regulator) { mutex_lock(&regulator_list_mutex); _regulator_put(regulator); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_put); /** * regulator_register_supply_alias - Provide device alias for supply lookup * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * @alias_dev: device that should be used to lookup the supply * @alias_id: Supply name or regulator ID that should be used to lookup the * supply * * All lookups for id on dev will instead be conducted for alias_id on * alias_dev. */ int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) return -EEXIST; map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL); if (!map) return -ENOMEM; map->src_dev = dev; map->src_supply = id; map->alias_dev = alias_dev; map->alias_supply = alias_id; list_add(&map->list, &regulator_supply_alias_list); pr_info("Adding alias for supply %s,%s -> %s,%s\n", id, dev_name(dev), alias_id, dev_name(alias_dev)); return 0; } EXPORT_SYMBOL_GPL(regulator_register_supply_alias); /** * regulator_unregister_supply_alias - Remove device alias * * @dev: device that will be given as the regulator "consumer" * @id: Supply name or regulator ID * * Remove a lookup alias if one exists for id on dev. */ void regulator_unregister_supply_alias(struct device *dev, const char *id) { struct regulator_supply_alias *map; map = regulator_find_supply_alias(dev, id); if (map) { list_del(&map->list); kfree(map); } } EXPORT_SYMBOL_GPL(regulator_unregister_supply_alias); /** * regulator_bulk_register_supply_alias - register multiple aliases * * @dev: device that will be given as the regulator "consumer" * @id: List of supply names or regulator IDs * @alias_dev: device that should be used to lookup the supply * @alias_id: List of supply names or regulator IDs that should be used to * lookup the supply * @num_id: Number of aliases to register * * @return 0 on success, an errno on failure. * * This helper function allows drivers to register several supply * aliases in one operation. If any of the aliases cannot be * registered any aliases that were registered will be removed * before returning to the caller. */ int regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id) { int i; int ret; for (i = 0; i < num_id; ++i) { ret = regulator_register_supply_alias(dev, id[i], alias_dev, alias_id[i]); if (ret < 0) goto err; } return 0; err: dev_err(dev, "Failed to create supply alias %s,%s -> %s,%s\n", id[i], dev_name(dev), alias_id[i], dev_name(alias_dev)); while (--i >= 0) regulator_unregister_supply_alias(dev, id[i]); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_register_supply_alias); /** * regulator_bulk_unregister_supply_alias - unregister multiple aliases * * @dev: device that will be given as the regulator "consumer" * @id: List of supply names or regulator IDs * @num_id: Number of aliases to unregister * * This helper function allows drivers to unregister several supply * aliases in one operation. */ void regulator_bulk_unregister_supply_alias(struct device *dev, const char *const *id, int num_id) { int i; for (i = 0; i < num_id; ++i) regulator_unregister_supply_alias(dev, id[i]); } EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); /* Manage enable GPIO list. Same GPIO pin can be shared among regulators */ static int regulator_ena_gpio_request(struct regulator_dev *rdev, const struct regulator_config *config) { struct regulator_enable_gpio *pin; struct gpio_desc *gpiod; int ret; gpiod = gpio_to_desc(config->ena_gpio); list_for_each_entry(pin, &regulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { rdev_dbg(rdev, "GPIO %d is already used\n", config->ena_gpio); goto update_ena_gpio_to_rdev; } } ret = gpio_request_one(config->ena_gpio, GPIOF_DIR_OUT | config->ena_gpio_flags, rdev_get_name(rdev)); if (ret) return ret; pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); if (pin == NULL) { gpio_free(config->ena_gpio); return -ENOMEM; } pin->gpiod = gpiod; pin->ena_gpio_invert = config->ena_gpio_invert; list_add(&pin->list, &regulator_ena_gpio_list); update_ena_gpio_to_rdev: pin->request_count++; rdev->ena_pin = pin; return 0; } static void regulator_ena_gpio_free(struct regulator_dev *rdev) { struct regulator_enable_gpio *pin, *n; if (!rdev->ena_pin) return; /* Free the GPIO only in case of no use */ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) { if (pin->gpiod == rdev->ena_pin->gpiod) { if (pin->request_count <= 1) { pin->request_count = 0; gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); } else { pin->request_count--; } } } } /** * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control * @rdev: regulator_dev structure * @enable: enable GPIO at initial use? * * GPIO is enabled in case of initial use. (enable_count is 0) * GPIO is disabled when it is not shared any more. (enable_count <= 1) */ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) { struct regulator_enable_gpio *pin = rdev->ena_pin; if (!pin) return -EINVAL; if (enable) { /* Enable GPIO at initial use */ if (pin->enable_count == 0) gpiod_set_value_cansleep(pin->gpiod, !pin->ena_gpio_invert); pin->enable_count++; } else { if (pin->enable_count > 1) { pin->enable_count--; return 0; } /* Disable GPIO if not used */ if (pin->enable_count <= 1) { gpiod_set_value_cansleep(pin->gpiod, pin->ena_gpio_invert); pin->enable_count = 0; } } return 0; } /** * _regulator_enable_delay - a delay helper function * @delay: time to delay in microseconds * * Delay for the requested amount of time as per the guidelines in: * * Documentation/timers/timers-howto.txt * * The assumption here is that regulators will never be enabled in * atomic context and therefore sleeping functions can be used. */ static void _regulator_enable_delay(unsigned int delay) { unsigned int ms = delay / 1000; unsigned int us = delay % 1000; if (ms > 0) { /* * For small enough values, handle super-millisecond * delays in the usleep_range() call below. */ if (ms < 20) us += ms * 1000; else msleep(ms); } /* * Give the scheduler some room to coalesce with any other * wakeup sources. For delays shorter than 10 us, don't even * bother setting up high-resolution timers and just busy- * loop. */ if (us >= 10) usleep_range(us, us + 100); else udelay(us); } static int _regulator_do_enable(struct regulator_dev *rdev) { int ret, delay; /* Query before enabling in case configuration dependent. */ ret = _regulator_get_enable_time(rdev); if (ret >= 0) { delay = ret; } else { rdev_warn(rdev, "enable_time() failed: %d\n", ret); delay = 0; } trace_regulator_enable(rdev_get_name(rdev)); if (rdev->desc->off_on_delay) { /* if needed, keep a distance of off_on_delay from last time * this regulator was disabled. */ unsigned long start_jiffy = jiffies; unsigned long intended, max_delay, remaining; max_delay = usecs_to_jiffies(rdev->desc->off_on_delay); intended = rdev->last_off_jiffy + max_delay; if (time_before(start_jiffy, intended)) { /* calc remaining jiffies to deal with one-time * timer wrapping. * in case of multiple timer wrapping, either it can be * detected by out-of-range remaining, or it cannot be * detected and we gets a panelty of * _regulator_enable_delay(). */ remaining = intended - start_jiffy; if (remaining <= max_delay) _regulator_enable_delay( jiffies_to_usecs(remaining)); } } if (rdev->ena_pin) { ret = regulator_ena_gpio_ctrl(rdev, true); if (ret < 0) return ret; rdev->ena_gpio_state = 1; } else if (rdev->desc->ops->enable) { ret = rdev->desc->ops->enable(rdev); if (ret < 0) return ret; } else { return -EINVAL; } /* Allow the regulator to ramp; it would be useful to extend * this for bulk operations so that the regulators can ramp * together. */ trace_regulator_enable_delay(rdev_get_name(rdev)); _regulator_enable_delay(delay); trace_regulator_enable_complete(rdev_get_name(rdev)); return 0; } /* locks held by regulator_enable() */ static int _regulator_enable(struct regulator_dev *rdev) { int ret; /* check voltage and requested load before enabling */ if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) drms_uA_update(rdev); if (rdev->use_count == 0) { /* The regulator may on if it's not switchable or left on */ ret = _regulator_is_enabled(rdev); if (ret == -EINVAL || ret == 0) { if (!_regulator_can_change_status(rdev)) return -EPERM; ret = _regulator_do_enable(rdev); if (ret < 0) return ret; } else if (ret < 0) { rdev_err(rdev, "is_enabled() failed: %d\n", ret); return ret; } /* Fallthrough on positive return values - already enabled */ } rdev->use_count++; return 0; } /** * regulator_enable - enable regulator output * @regulator: regulator source * * Request that the regulator be enabled with the regulator output at * the predefined voltage or current value. Calls to regulator_enable() * must be balanced with calls to regulator_disable(). * * NOTE: the output value can be set by other drivers, boot loader or may be * hardwired in the regulator. */ int regulator_enable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (regulator->always_on) return 0; if (rdev->supply) { ret = regulator_enable(rdev->supply); if (ret != 0) return ret; } mutex_lock(&rdev->mutex); ret = _regulator_enable(rdev); mutex_unlock(&rdev->mutex); if (ret != 0 && rdev->supply) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_enable); static int _regulator_do_disable(struct regulator_dev *rdev) { int ret; trace_regulator_disable(rdev_get_name(rdev)); if (rdev->ena_pin) { ret = regulator_ena_gpio_ctrl(rdev, false); if (ret < 0) return ret; rdev->ena_gpio_state = 0; } else if (rdev->desc->ops->disable) { ret = rdev->desc->ops->disable(rdev); if (ret != 0) return ret; } /* cares about last_off_jiffy only if off_on_delay is required by * device. */ if (rdev->desc->off_on_delay) rdev->last_off_jiffy = jiffies; trace_regulator_disable_complete(rdev_get_name(rdev)); return 0; } /* locks held by regulator_disable() */ static int _regulator_disable(struct regulator_dev *rdev) { int ret = 0; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) return -EIO; /* are we the last user and permitted to disable ? */ if (rdev->use_count == 1 && (rdev->constraints && !rdev->constraints->always_on)) { /* we are last user */ if (_regulator_can_change_status(rdev)) { ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to disable\n"); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, NULL); } rdev->use_count = 0; } else if (rdev->use_count > 1) { if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) drms_uA_update(rdev); rdev->use_count--; } return ret; } /** * regulator_disable - disable regulator output * @regulator: regulator source * * Disable the regulator output voltage or current. Calls to * regulator_enable() must be balanced with calls to * regulator_disable(). * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. */ int regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (regulator->always_on) return 0; mutex_lock(&rdev->mutex); ret = _regulator_disable(rdev); mutex_unlock(&rdev->mutex); if (ret == 0 && rdev->supply) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_disable); /* locks held by regulator_force_disable() */ static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; ret = _regulator_do_disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to force disable\n"); return ret; } _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | REGULATOR_EVENT_DISABLE, NULL); return 0; } /** * regulator_force_disable - force disable regulator output * @regulator: regulator source * * Forcibly disable the regulator output voltage or current. * NOTE: this *will* disable the regulator output even if other consumer * devices have it enabled. This should be used for situations when device * damage will likely occur if the regulator is not disabled (e.g. over temp). */ int regulator_force_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); regulator->uA_load = 0; ret = _regulator_force_disable(regulator->rdev); mutex_unlock(&rdev->mutex); if (rdev->supply) while (rdev->open_count--) regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_force_disable); static void regulator_disable_work(struct work_struct *work) { struct regulator_dev *rdev = container_of(work, struct regulator_dev, disable_work.work); int count, i, ret; mutex_lock(&rdev->mutex); BUG_ON(!rdev->deferred_disables); count = rdev->deferred_disables; rdev->deferred_disables = 0; for (i = 0; i < count; i++) { ret = _regulator_disable(rdev); if (ret != 0) rdev_err(rdev, "Deferred disable failed: %d\n", ret); } mutex_unlock(&rdev->mutex); if (rdev->supply) { for (i = 0; i < count; i++) { ret = regulator_disable(rdev->supply); if (ret != 0) { rdev_err(rdev, "Supply disable failed: %d\n", ret); } } } } /** * regulator_disable_deferred - disable regulator output with delay * @regulator: regulator source * @ms: miliseconds until the regulator is disabled * * Execute regulator_disable() on the regulator after a delay. This * is intended for use with devices that require some time to quiesce. * * NOTE: this will only disable the regulator output if no other consumer * devices have it enabled, the regulator device supports disabling and * machine constraints permit this operation. */ int regulator_disable_deferred(struct regulator *regulator, int ms) { struct regulator_dev *rdev = regulator->rdev; int ret; if (regulator->always_on) return 0; if (!ms) return regulator_disable(regulator); mutex_lock(&rdev->mutex); rdev->deferred_disables++; mutex_unlock(&rdev->mutex); ret = queue_delayed_work(system_power_efficient_wq, &rdev->disable_work, msecs_to_jiffies(ms)); if (ret < 0) return ret; else return 0; } EXPORT_SYMBOL_GPL(regulator_disable_deferred); static int _regulator_is_enabled(struct regulator_dev *rdev) { /* A GPIO control always takes precedence */ if (rdev->ena_pin) return rdev->ena_gpio_state; /* If we don't know then assume that the regulator is always on */ if (!rdev->desc->ops->is_enabled) return 1; return rdev->desc->ops->is_enabled(rdev); } /** * regulator_is_enabled - is the regulator output enabled * @regulator: regulator source * * Returns positive if the regulator driver backing the source/client * has requested that the device be enabled, zero if it hasn't, else a * negative errno code. * * Note that the device backing this regulator handle can have multiple * users, so it might be enabled even if regulator_enable() was never * called for this particular source. */ int regulator_is_enabled(struct regulator *regulator) { int ret; if (regulator->always_on) return 1; mutex_lock(&regulator->rdev->mutex); ret = _regulator_is_enabled(regulator->rdev); mutex_unlock(&regulator->rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_is_enabled); /** * regulator_can_change_voltage - check if regulator can change voltage * @regulator: regulator source * * Returns positive if the regulator driver backing the source/client * can change its voltage, false otherwise. Useful for detecting fixed * or dummy regulators and disabling voltage change logic in the client * driver. */ int regulator_can_change_voltage(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1) return 1; if (rdev->desc->continuous_voltage_range && rdev->constraints->min_uV && rdev->constraints->max_uV && rdev->constraints->min_uV != rdev->constraints->max_uV) return 1; } return 0; } EXPORT_SYMBOL_GPL(regulator_can_change_voltage); /** * regulator_count_voltages - count regulator_list_voltage() selectors * @regulator: regulator source * * Returns number of selectors, or negative errno. Selectors are * numbered starting at zero, and typically correspond to bitfields * in hardware registers. */ int regulator_count_voltages(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; if (rdev->desc->n_voltages) return rdev->desc->n_voltages; if (!rdev->supply) return -EINVAL; return regulator_count_voltages(rdev->supply); } EXPORT_SYMBOL_GPL(regulator_count_voltages); /** * regulator_list_voltage - enumerate supported voltages * @regulator: regulator source * @selector: identify voltage to list * Context: can sleep * * Returns a voltage that can be passed to @regulator_set_voltage(), * zero if this selector code can't be used on this system, or a * negative errno. */ int regulator_list_voltage(struct regulator *regulator, unsigned selector) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; int ret; if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector) return rdev->desc->fixed_uV; if (ops->list_voltage) { if (selector >= rdev->desc->n_voltages) return -EINVAL; mutex_lock(&rdev->mutex); ret = ops->list_voltage(rdev, selector); mutex_unlock(&rdev->mutex); } else if (rdev->supply) { ret = regulator_list_voltage(rdev->supply, selector); } else { return -EINVAL; } if (ret > 0) { if (ret < rdev->constraints->min_uV) ret = 0; else if (ret > rdev->constraints->max_uV) ret = 0; } return ret; } EXPORT_SYMBOL_GPL(regulator_list_voltage); /** * regulator_get_regmap - get the regulator's register map * @regulator: regulator source * * Returns the register map for the given regulator, or an ERR_PTR value * if the regulator doesn't use regmap. */ struct regmap *regulator_get_regmap(struct regulator *regulator) { struct regmap *map = regulator->rdev->regmap; return map ? map : ERR_PTR(-EOPNOTSUPP); } /** * regulator_get_hardware_vsel_register - get the HW voltage selector register * @regulator: regulator source * @vsel_reg: voltage selector register, output parameter * @vsel_mask: mask for voltage selector bitfield, output parameter * * Returns the hardware register offset and bitmask used for setting the * regulator voltage. This might be useful when configuring voltage-scaling * hardware or firmware that can make I2C requests behind the kernel's back, * for example. * * On success, the output parameters @vsel_reg and @vsel_mask are filled in * and 0 is returned, otherwise a negative errno is returned. */ int regulator_get_hardware_vsel_register(struct regulator *regulator, unsigned *vsel_reg, unsigned *vsel_mask) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; *vsel_reg = rdev->desc->vsel_reg; *vsel_mask = rdev->desc->vsel_mask; return 0; } EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register); /** * regulator_list_hardware_vsel - get the HW-specific register value for a selector * @regulator: regulator source * @selector: identify voltage to list * * Converts the selector to a hardware-specific voltage selector that can be * directly written to the regulator registers. The address of the voltage * register can be determined by calling @regulator_get_hardware_vsel_register. * * On error a negative errno is returned. */ int regulator_list_hardware_vsel(struct regulator *regulator, unsigned selector) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; if (selector >= rdev->desc->n_voltages) return -EINVAL; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; return selector; } EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel); /** * regulator_get_linear_step - return the voltage step size between VSEL values * @regulator: regulator source * * Returns the voltage step size between VSEL values for linear * regulators, or return 0 if the regulator isn't a linear regulator. */ unsigned int regulator_get_linear_step(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; return rdev->desc->uV_step; } EXPORT_SYMBOL_GPL(regulator_get_linear_step); /** * regulator_is_supported_voltage - check if a voltage range can be supported * * @regulator: Regulator to check. * @min_uV: Minimum required voltage in uV. * @max_uV: Maximum required voltage in uV. * * Returns a boolean or a negative error code. */ int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; int i, voltages, ret; /* If we can't change voltage check the current voltage */ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { ret = regulator_get_voltage(regulator); if (ret >= 0) return min_uV <= ret && ret <= max_uV; else return ret; } /* Any voltage within constrains range is fine? */ if (rdev->desc->continuous_voltage_range) return min_uV >= rdev->constraints->min_uV && max_uV <= rdev->constraints->max_uV; ret = regulator_count_voltages(regulator); if (ret < 0) return ret; voltages = ret; for (i = 0; i < voltages; i++) { ret = regulator_list_voltage(regulator, i); if (ret >= min_uV && ret <= max_uV) return 1; } return 0; } EXPORT_SYMBOL_GPL(regulator_is_supported_voltage); static int _regulator_call_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct pre_voltage_change_data data; int ret; data.old_uV = _regulator_get_voltage(rdev); data.min_uV = min_uV; data.max_uV = max_uV; ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, &data); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector); if (ret >= 0) return ret; _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, (void *)data.old_uV); return ret; } static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev, int uV, unsigned selector) { struct pre_voltage_change_data data; int ret; data.old_uV = _regulator_get_voltage(rdev); data.min_uV = uV; data.max_uV = uV; ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, &data); if (ret & NOTIFY_STOP_MASK) return -EINVAL; ret = rdev->desc->ops->set_voltage_sel(rdev, selector); if (ret >= 0) return ret; _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, (void *)data.old_uV); return ret; } static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret; int delay = 0; int best_val = 0; unsigned int selector; int old_selector = -1; trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); min_uV += rdev->constraints->uV_offset; max_uV += rdev->constraints->uV_offset; /* * If we can't obtain the old selector there is not enough * info to call set_voltage_time_sel(). */ if (_regulator_is_enabled(rdev) && rdev->desc->ops->set_voltage_time_sel && rdev->desc->ops->get_voltage_sel) { old_selector = rdev->desc->ops->get_voltage_sel(rdev); if (old_selector < 0) return old_selector; } if (rdev->desc->ops->set_voltage) { ret = _regulator_call_set_voltage(rdev, min_uV, max_uV, &selector); if (ret >= 0) { if (rdev->desc->ops->list_voltage) best_val = rdev->desc->ops->list_voltage(rdev, selector); else best_val = _regulator_get_voltage(rdev); } } else if (rdev->desc->ops->set_voltage_sel) { if (rdev->desc->ops->map_voltage) { ret = rdev->desc->ops->map_voltage(rdev, min_uV, max_uV); } else { if (rdev->desc->ops->list_voltage == regulator_list_voltage_linear) ret = regulator_map_voltage_linear(rdev, min_uV, max_uV); else if (rdev->desc->ops->list_voltage == regulator_list_voltage_linear_range) ret = regulator_map_voltage_linear_range(rdev, min_uV, max_uV); else ret = regulator_map_voltage_iterate(rdev, min_uV, max_uV); } if (ret >= 0) { best_val = rdev->desc->ops->list_voltage(rdev, ret); if (min_uV <= best_val && max_uV >= best_val) { selector = ret; if (old_selector == selector) ret = 0; else ret = _regulator_call_set_voltage_sel( rdev, best_val, selector); } else { ret = -EINVAL; } } } else { ret = -EINVAL; } /* Call set_voltage_time_sel if successfully obtained old_selector */ if (ret == 0 && !rdev->constraints->ramp_disable && old_selector >= 0 && old_selector != selector) { delay = rdev->desc->ops->set_voltage_time_sel(rdev, old_selector, selector); if (delay < 0) { rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n", delay); delay = 0; } /* Insert any necessary delays */ if (delay >= 1000) { mdelay(delay / 1000); udelay(delay % 1000); } else if (delay) { udelay(delay); } } if (ret == 0 && best_val >= 0) { unsigned long data = best_val; _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, (void *)data); } trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val); return ret; } /** * regulator_set_voltage - set regulator output voltage * @regulator: regulator source * @min_uV: Minimum required voltage in uV * @max_uV: Maximum acceptable voltage in uV * * Sets a voltage regulator to the desired output voltage. This can be set * during any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the voltage will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new voltage when enabled. * * NOTE: If the regulator is shared between several devices then the lowest * request voltage that meets the system constraints will be used. * Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; int old_min_uV, old_max_uV; int current_uV; mutex_lock(&rdev->mutex); /* If we're setting the same range as last time the change * should be a noop (some cpufreq implementations use the same * voltage for multiple frequencies, for example). */ if (regulator->min_uV == min_uV && regulator->max_uV == max_uV) goto out; /* If we're trying to set a range that overlaps the current voltage, * return succesfully even though the regulator does not support * changing the voltage. */ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { current_uV = _regulator_get_voltage(rdev); if (min_uV <= current_uV && current_uV <= max_uV) { regulator->min_uV = min_uV; regulator->max_uV = max_uV; goto out; } } /* sanity check */ if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; /* restore original values in case of error */ old_min_uV = regulator->min_uV; old_max_uV = regulator->max_uV; regulator->min_uV = min_uV; regulator->max_uV = max_uV; ret = regulator_check_consumers(rdev, &min_uV, &max_uV); if (ret < 0) goto out2; ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); if (ret < 0) goto out2; out: mutex_unlock(&rdev->mutex); return ret; out2: regulator->min_uV = old_min_uV; regulator->max_uV = old_max_uV; mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_voltage); /** * regulator_set_voltage_time - get raise/fall time * @regulator: regulator source * @old_uV: starting voltage in microvolts * @new_uV: target voltage in microvolts * * Provided with the starting and ending voltage, this function attempts to * calculate the time in microseconds required to rise or fall to this new * voltage. */ int regulator_set_voltage_time(struct regulator *regulator, int old_uV, int new_uV) { struct regulator_dev *rdev = regulator->rdev; const struct regulator_ops *ops = rdev->desc->ops; int old_sel = -1; int new_sel = -1; int voltage; int i; /* Currently requires operations to do this */ if (!ops->list_voltage || !ops->set_voltage_time_sel || !rdev->desc->n_voltages) return -EINVAL; for (i = 0; i < rdev->desc->n_voltages; i++) { /* We only look for exact voltage matches here */ voltage = regulator_list_voltage(regulator, i); if (voltage < 0) return -EINVAL; if (voltage == 0) continue; if (voltage == old_uV) old_sel = i; if (voltage == new_uV) new_sel = i; } if (old_sel < 0 || new_sel < 0) return -EINVAL; return ops->set_voltage_time_sel(rdev, old_sel, new_sel); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time); /** * regulator_set_voltage_time_sel - get raise/fall time * @rdev: regulator source device * @old_selector: selector for starting voltage * @new_selector: selector for target voltage * * Provided with the starting and target voltage selectors, this function * returns time in microseconds required to rise or fall to this new voltage * * Drivers providing ramp_delay in regulation_constraints can use this as their * set_voltage_time_sel() operation. */ int regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_selector, unsigned int new_selector) { unsigned int ramp_delay = 0; int old_volt, new_volt; if (rdev->constraints->ramp_delay) ramp_delay = rdev->constraints->ramp_delay; else if (rdev->desc->ramp_delay) ramp_delay = rdev->desc->ramp_delay; if (ramp_delay == 0) { rdev_warn(rdev, "ramp_delay not set\n"); return 0; } /* sanity check */ if (!rdev->desc->ops->list_voltage) return -EINVAL; old_volt = rdev->desc->ops->list_voltage(rdev, old_selector); new_volt = rdev->desc->ops->list_voltage(rdev, new_selector); return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay); } EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel); /** * regulator_sync_voltage - re-apply last regulator output voltage * @regulator: regulator source * * Re-apply the last configured voltage. This is intended to be used * where some external control source the consumer is cooperating with * has caused the configured voltage to change. */ int regulator_sync_voltage(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; int ret, min_uV, max_uV; mutex_lock(&rdev->mutex); if (!rdev->desc->ops->set_voltage && !rdev->desc->ops->set_voltage_sel) { ret = -EINVAL; goto out; } /* This is only going to work if we've had a voltage configured. */ if (!regulator->min_uV && !regulator->max_uV) { ret = -EINVAL; goto out; } min_uV = regulator->min_uV; max_uV = regulator->max_uV; /* This should be a paranoia check... */ ret = regulator_check_voltage(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = regulator_check_consumers(rdev, &min_uV, &max_uV); if (ret < 0) goto out; ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_sync_voltage); static int _regulator_get_voltage(struct regulator_dev *rdev) { int sel, ret; if (rdev->desc->ops->get_voltage_sel) { sel = rdev->desc->ops->get_voltage_sel(rdev); if (sel < 0) return sel; ret = rdev->desc->ops->list_voltage(rdev, sel); } else if (rdev->desc->ops->get_voltage) { ret = rdev->desc->ops->get_voltage(rdev); } else if (rdev->desc->ops->list_voltage) { ret = rdev->desc->ops->list_voltage(rdev, 0); } else if (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1)) { ret = rdev->desc->fixed_uV; } else if (rdev->supply) { ret = regulator_get_voltage(rdev->supply); } else { return -EINVAL; } if (ret < 0) return ret; return ret - rdev->constraints->uV_offset; } /** * regulator_get_voltage - get regulator output voltage * @regulator: regulator source * * This returns the current regulator voltage in uV. * * NOTE: If the regulator is disabled it will return the voltage value. This * function should not be used to determine regulator state. */ int regulator_get_voltage(struct regulator *regulator) { int ret; mutex_lock(&regulator->rdev->mutex); ret = _regulator_get_voltage(regulator->rdev); mutex_unlock(&regulator->rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_get_voltage); /** * regulator_set_current_limit - set regulator output current limit * @regulator: regulator source * @min_uA: Minimum supported current in uA * @max_uA: Maximum supported current in uA * * Sets current sink to the desired output current. This can be set during * any regulator state. IOW, regulator can be disabled or enabled. * * If the regulator is enabled then the current will change to the new value * immediately otherwise if the regulator is disabled the regulator will * output at the new current when enabled. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_current_limit(struct regulator *regulator, int min_uA, int max_uA) { struct regulator_dev *rdev = regulator->rdev; int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->set_current_limit) { ret = -EINVAL; goto out; } /* constraints check */ ret = regulator_check_current_limit(rdev, &min_uA, &max_uA); if (ret < 0) goto out; ret = rdev->desc->ops->set_current_limit(rdev, min_uA, max_uA); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_current_limit); static int _regulator_get_current_limit(struct regulator_dev *rdev) { int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->get_current_limit) { ret = -EINVAL; goto out; } ret = rdev->desc->ops->get_current_limit(rdev); out: mutex_unlock(&rdev->mutex); return ret; } /** * regulator_get_current_limit - get regulator output current * @regulator: regulator source * * This returns the current supplied by the specified current sink in uA. * * NOTE: If the regulator is disabled it will return the current value. This * function should not be used to determine regulator state. */ int regulator_get_current_limit(struct regulator *regulator) { return _regulator_get_current_limit(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_current_limit); /** * regulator_set_mode - set regulator operating mode * @regulator: regulator source * @mode: operating mode - one of the REGULATOR_MODE constants * * Set regulator operating mode to increase regulator efficiency or improve * regulation performance. * * NOTE: Regulator system constraints must be set for this regulator before * calling this function otherwise this call will fail. */ int regulator_set_mode(struct regulator *regulator, unsigned int mode) { struct regulator_dev *rdev = regulator->rdev; int ret; int regulator_curr_mode; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->set_mode) { ret = -EINVAL; goto out; } /* return if the same mode is requested */ if (rdev->desc->ops->get_mode) { regulator_curr_mode = rdev->desc->ops->get_mode(rdev); if (regulator_curr_mode == mode) { ret = 0; goto out; } } /* constraints check */ ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) goto out; ret = rdev->desc->ops->set_mode(rdev, mode); out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_mode); static unsigned int _regulator_get_mode(struct regulator_dev *rdev) { int ret; mutex_lock(&rdev->mutex); /* sanity check */ if (!rdev->desc->ops->get_mode) { ret = -EINVAL; goto out; } ret = rdev->desc->ops->get_mode(rdev); out: mutex_unlock(&rdev->mutex); return ret; } /** * regulator_get_mode - get regulator operating mode * @regulator: regulator source * * Get the current regulator operating mode. */ unsigned int regulator_get_mode(struct regulator *regulator) { return _regulator_get_mode(regulator->rdev); } EXPORT_SYMBOL_GPL(regulator_get_mode); /** * regulator_set_optimum_mode - set regulator optimum operating mode * @regulator: regulator source * @uA_load: load current * * Notifies the regulator core of a new device load. This is then used by * DRMS (if enabled by constraints) to set the most efficient regulator * operating mode for the new regulator loading. * * Consumer devices notify their supply regulator of the maximum power * they will require (can be taken from device datasheet in the power * consumption tables) when they change operational status and hence power * state. Examples of operational state changes that can affect power * consumption are :- * * o Device is opened / closed. * o Device I/O is about to begin or has just finished. * o Device is idling in between work. * * This information is also exported via sysfs to userspace. * * DRMS will sum the total requested load on the regulator and change * to the most efficient operating mode if platform constraints allow. * * Returns the new regulator mode or error. */ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) { struct regulator_dev *rdev = regulator->rdev; struct regulator *consumer; int ret, output_uV, input_uV = 0, total_uA_load = 0; unsigned int mode; if (rdev->supply) input_uV = regulator_get_voltage(rdev->supply); mutex_lock(&rdev->mutex); /* * first check to see if we can set modes at all, otherwise just * tell the consumer everything is OK. */ regulator->uA_load = uA_load; ret = regulator_check_drms(rdev); if (ret < 0) { ret = 0; goto out; } if (!rdev->desc->ops->get_optimum_mode) goto out; /* * we can actually do this so any errors are indicators of * potential real failure. */ ret = -EINVAL; if (!rdev->desc->ops->set_mode) goto out; /* get output voltage */ output_uV = _regulator_get_voltage(rdev); if (output_uV <= 0) { rdev_err(rdev, "invalid output voltage found\n"); goto out; } /* No supply? Use constraint voltage */ if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { rdev_err(rdev, "invalid input voltage found\n"); goto out; } /* calc total requested load for this regulator */ list_for_each_entry(consumer, &rdev->consumer_list, list) total_uA_load += consumer->uA_load; mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, output_uV, total_uA_load); ret = regulator_mode_constrain(rdev, &mode); if (ret < 0) { rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", total_uA_load, input_uV, output_uV); goto out; } ret = rdev->desc->ops->set_mode(rdev, mode); if (ret < 0) { rdev_err(rdev, "failed to set optimum mode %x\n", mode); goto out; } ret = mode; out: mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); /** * regulator_allow_bypass - allow the regulator to go into bypass mode * * @regulator: Regulator to configure * @enable: enable or disable bypass mode * * Allow the regulator to go into bypass mode if all other consumers * for the regulator also enable bypass mode and the machine * constraints allow this. Bypass mode means that the regulator is * simply passing the input directly to the output with no regulation. */ int regulator_allow_bypass(struct regulator *regulator, bool enable) { struct regulator_dev *rdev = regulator->rdev; int ret = 0; if (!rdev->desc->ops->set_bypass) return 0; if (rdev->constraints && !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS)) return 0; mutex_lock(&rdev->mutex); if (enable && !regulator->bypass) { rdev->bypass_count++; if (rdev->bypass_count == rdev->open_count) { ret = rdev->desc->ops->set_bypass(rdev, enable); if (ret != 0) rdev->bypass_count--; } } else if (!enable && regulator->bypass) { rdev->bypass_count--; if (rdev->bypass_count != rdev->open_count) { ret = rdev->desc->ops->set_bypass(rdev, enable); if (ret != 0) rdev->bypass_count++; } } if (ret == 0) regulator->bypass = enable; mutex_unlock(&rdev->mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_allow_bypass); /** * regulator_register_notifier - register regulator event notifier * @regulator: regulator source * @nb: notifier block * * Register notifier block to receive regulator events. */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_register(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_register_notifier); /** * regulator_unregister_notifier - unregister regulator event notifier * @regulator: regulator source * @nb: notifier block * * Unregister regulator event notifier block. */ int regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb) { return blocking_notifier_chain_unregister(&regulator->rdev->notifier, nb); } EXPORT_SYMBOL_GPL(regulator_unregister_notifier); /* notify regulator consumers and downstream regulator consumers. * Note mutex must be held by caller. */ static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ return blocking_notifier_call_chain(&rdev->notifier, event, data); } /** * regulator_bulk_get - get multiple regulator consumers * * @dev: Device to supply * @num_consumers: Number of consumers to register * @consumers: Configuration of consumers; clients are stored here. * * @return 0 on success, an errno on failure. * * This helper function allows drivers to get several regulator * consumers in one operation. If any of the regulators cannot be * acquired then any regulators that were allocated will be freed * before returning to the caller. */ int regulator_bulk_get(struct device *dev, int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].consumer = NULL; for (i = 0; i < num_consumers; i++) { consumers[i].consumer = regulator_get(dev, consumers[i].supply); if (IS_ERR(consumers[i].consumer)) { ret = PTR_ERR(consumers[i].consumer); dev_err(dev, "Failed to get supply '%s': %d\n", consumers[i].supply, ret); consumers[i].consumer = NULL; goto err; } } return 0; err: while (--i >= 0) regulator_put(consumers[i].consumer); return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_get); static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) { struct regulator_bulk_data *bulk = data; bulk->ret = regulator_enable(bulk->consumer); } /** * regulator_bulk_enable - enable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to enable multiple regulator * clients in a single API call. If any consumers cannot be enabled * then any others that were enabled will be disabled again prior to * return. */ int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { ASYNC_DOMAIN_EXCLUSIVE(async_domain); int i; int ret = 0; for (i = 0; i < num_consumers; i++) { if (consumers[i].consumer->always_on) consumers[i].ret = 0; else async_schedule_domain(regulator_bulk_enable_async, &consumers[i], &async_domain); } async_synchronize_full_domain(&async_domain); /* If any consumer failed we need to unwind any that succeeded */ for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto err; } } return 0; err: for (i = 0; i < num_consumers; i++) { if (consumers[i].ret < 0) pr_err("Failed to enable %s: %d\n", consumers[i].supply, consumers[i].ret); else regulator_disable(consumers[i].consumer); } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_enable); /** * regulator_bulk_disable - disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to disable multiple regulator * clients in a single API call. If any consumers cannot be disabled * then any others that were disabled will be enabled again prior to * return. */ int regulator_bulk_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret, r; for (i = num_consumers - 1; i >= 0; --i) { ret = regulator_disable(consumers[i].consumer); if (ret != 0) goto err; } return 0; err: pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret); for (++i; i < num_consumers; ++i) { r = regulator_enable(consumers[i].consumer); if (r != 0) pr_err("Failed to reename %s: %d\n", consumers[i].supply, r); } return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_disable); /** * regulator_bulk_force_disable - force disable multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * @return 0 on success, an errno on failure * * This convenience API allows consumers to forcibly disable multiple regulator * clients in a single API call. * NOTE: This should be used for situations when device damage will * likely occur if the regulators are not disabled (e.g. over temp). * Although regulator_force_disable function call for some consumers can * return error numbers, the function is called for all consumers. */ int regulator_bulk_force_disable(int num_consumers, struct regulator_bulk_data *consumers) { int i; int ret; for (i = 0; i < num_consumers; i++) consumers[i].ret = regulator_force_disable(consumers[i].consumer); for (i = 0; i < num_consumers; i++) { if (consumers[i].ret != 0) { ret = consumers[i].ret; goto out; } } return 0; out: return ret; } EXPORT_SYMBOL_GPL(regulator_bulk_force_disable); /** * regulator_bulk_free - free multiple regulator consumers * * @num_consumers: Number of consumers * @consumers: Consumer data; clients are stored here. * * This convenience API allows consumers to free multiple regulator * clients in a single API call. */ void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers) { int i; for (i = 0; i < num_consumers; i++) { regulator_put(consumers[i].consumer); consumers[i].consumer = NULL; } } EXPORT_SYMBOL_GPL(regulator_bulk_free); /** * regulator_notifier_call_chain - call regulator event notifier * @rdev: regulator source * @event: notifier block * @data: callback-specific data. * * Called by regulator drivers to notify clients a regulator event has * occurred. We also notify regulator clients downstream. * Note lock must be held by caller. */ int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { _notifier_call_chain(rdev, event, data); return NOTIFY_DONE; } EXPORT_SYMBOL_GPL(regulator_notifier_call_chain); /** * regulator_mode_to_status - convert a regulator mode into a status * * @mode: Mode to convert * * Convert a regulator mode into a status. */ int regulator_mode_to_status(unsigned int mode) { switch (mode) { case REGULATOR_MODE_FAST: return REGULATOR_STATUS_FAST; case REGULATOR_MODE_NORMAL: return REGULATOR_STATUS_NORMAL; case REGULATOR_MODE_IDLE: return REGULATOR_STATUS_IDLE; case REGULATOR_MODE_STANDBY: return REGULATOR_STATUS_STANDBY; default: return REGULATOR_STATUS_UNDEFINED; } } EXPORT_SYMBOL_GPL(regulator_mode_to_status); /* * To avoid cluttering sysfs (and memory) with useless state, only * create attributes that can be meaningfully displayed. */ static int add_regulator_attributes(struct regulator_dev *rdev) { struct device *dev = &rdev->dev; const struct regulator_ops *ops = rdev->desc->ops; int status = 0; /* some attributes need specific methods to be displayed */ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { status = device_create_file(dev, &dev_attr_microvolts); if (status < 0) return status; } if (ops->get_current_limit) { status = device_create_file(dev, &dev_attr_microamps); if (status < 0) return status; } if (ops->get_mode) { status = device_create_file(dev, &dev_attr_opmode); if (status < 0) return status; } if (rdev->ena_pin || ops->is_enabled) { status = device_create_file(dev, &dev_attr_state); if (status < 0) return status; } if (ops->get_status) { status = device_create_file(dev, &dev_attr_status); if (status < 0) return status; } if (ops->get_bypass) { status = device_create_file(dev, &dev_attr_bypass); if (status < 0) return status; } /* some attributes are type-specific */ if (rdev->desc->type == REGULATOR_CURRENT) { status = device_create_file(dev, &dev_attr_requested_microamps); if (status < 0) return status; } /* all the other attributes exist to support constraints; * don't show them if there are no constraints, or if the * relevant supporting methods are missing. */ if (!rdev->constraints) return status; /* constraints need specific supporting methods */ if (ops->set_voltage || ops->set_voltage_sel) { status = device_create_file(dev, &dev_attr_min_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_max_microvolts); if (status < 0) return status; } if (ops->set_current_limit) { status = device_create_file(dev, &dev_attr_min_microamps); if (status < 0) return status; status = device_create_file(dev, &dev_attr_max_microamps); if (status < 0) return status; } status = device_create_file(dev, &dev_attr_suspend_standby_state); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_state); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_state); if (status < 0) return status; if (ops->set_suspend_voltage) { status = device_create_file(dev, &dev_attr_suspend_standby_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_microvolts); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_microvolts); if (status < 0) return status; } if (ops->set_suspend_mode) { status = device_create_file(dev, &dev_attr_suspend_standby_mode); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_mem_mode); if (status < 0) return status; status = device_create_file(dev, &dev_attr_suspend_disk_mode); if (status < 0) return status; } return status; } static void rdev_init_debugfs(struct regulator_dev *rdev) { rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); if (!rdev->debugfs) { rdev_warn(rdev, "Failed to create debugfs directory\n"); return; } debugfs_create_u32("use_count", 0444, rdev->debugfs, &rdev->use_count); debugfs_create_u32("open_count", 0444, rdev->debugfs, &rdev->open_count); debugfs_create_u32("bypass_count", 0444, rdev->debugfs, &rdev->bypass_count); } /** * regulator_register - register regulator * @regulator_desc: regulator to register * @config: runtime configuration for regulator * * Called by regulator drivers to register a regulator. * Returns a valid pointer to struct regulator_dev on success * or an ERR_PTR() on error. */ struct regulator_dev * regulator_register(const struct regulator_desc *regulator_desc, const struct regulator_config *config) { const struct regulation_constraints *constraints = NULL; const struct regulator_init_data *init_data; static atomic_t regulator_no = ATOMIC_INIT(0); struct regulator_dev *rdev; struct device *dev; int ret, i; const char *supply = NULL; if (regulator_desc == NULL || config == NULL) return ERR_PTR(-EINVAL); dev = config->dev; WARN_ON(!dev); if (regulator_desc->name == NULL || regulator_desc->ops == NULL) return ERR_PTR(-EINVAL); if (regulator_desc->type != REGULATOR_VOLTAGE && regulator_desc->type != REGULATOR_CURRENT) return ERR_PTR(-EINVAL); /* Only one of each should be implemented */ WARN_ON(regulator_desc->ops->get_voltage && regulator_desc->ops->get_voltage_sel); WARN_ON(regulator_desc->ops->set_voltage && regulator_desc->ops->set_voltage_sel); /* If we're using selectors we must implement list_voltage. */ if (regulator_desc->ops->get_voltage_sel && !regulator_desc->ops->list_voltage) { return ERR_PTR(-EINVAL); } if (regulator_desc->ops->set_voltage_sel && !regulator_desc->ops->list_voltage) { return ERR_PTR(-EINVAL); } rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL); if (rdev == NULL) return ERR_PTR(-ENOMEM); init_data = regulator_of_get_init_data(dev, regulator_desc, &rdev->dev.of_node); if (!init_data) { init_data = config->init_data; rdev->dev.of_node = of_node_get(config->of_node); } mutex_lock(&regulator_list_mutex); mutex_init(&rdev->mutex); rdev->reg_data = config->driver_data; rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; if (config->regmap) rdev->regmap = config->regmap; else if (dev_get_regmap(dev, NULL)) rdev->regmap = dev_get_regmap(dev, NULL); else if (dev->parent) rdev->regmap = dev_get_regmap(dev->parent, NULL); INIT_LIST_HEAD(&rdev->consumer_list); INIT_LIST_HEAD(&rdev->list); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work); /* preform any regulator specific init */ if (init_data && init_data->regulator_init) { ret = init_data->regulator_init(rdev->reg_data); if (ret < 0) goto clean; } /* register with sysfs */ rdev->dev.class = &regulator_class; rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%d", atomic_inc_return(&regulator_no) - 1); ret = device_register(&rdev->dev); if (ret != 0) { put_device(&rdev->dev); goto clean; } dev_set_drvdata(&rdev->dev, rdev); if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { ret = regulator_ena_gpio_request(rdev, config); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", config->ena_gpio, ret); goto wash; } if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) rdev->ena_gpio_state = 1; if (config->ena_gpio_invert) rdev->ena_gpio_state = !rdev->ena_gpio_state; } /* set regulator constraints */ if (init_data) constraints = &init_data->constraints; ret = set_machine_constraints(rdev, constraints); if (ret < 0) goto scrub; /* add attributes supported by this regulator */ ret = add_regulator_attributes(rdev); if (ret < 0) goto scrub; if (init_data && init_data->supply_regulator) supply = init_data->supply_regulator; else if (regulator_desc->supply_name) supply = regulator_desc->supply_name; if (supply) { struct regulator_dev *r; r = regulator_dev_lookup(dev, supply, &ret); if (ret == -ENODEV) { /* * No supply was specified for this regulator and * there will never be one. */ ret = 0; goto add_dev; } else if (!r) { dev_err(dev, "Failed to find supply %s\n", supply); ret = -EPROBE_DEFER; goto scrub; } ret = set_supply(rdev, r); if (ret < 0) goto scrub; /* Enable supply if rail is enabled */ if (_regulator_is_enabled(rdev)) { ret = regulator_enable(rdev->supply); if (ret < 0) goto scrub; } } add_dev: /* add consumers devices */ if (init_data) { for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } } list_add(&rdev->list, &regulator_list); rdev_init_debugfs(rdev); out: mutex_unlock(&regulator_list_mutex); return rdev; unset_supplies: unset_regulator_supplies(rdev); scrub: if (rdev->supply) _regulator_put(rdev->supply); regulator_ena_gpio_free(rdev); kfree(rdev->constraints); wash: device_unregister(&rdev->dev); /* device core frees rdev */ rdev = ERR_PTR(ret); goto out; clean: kfree(rdev); rdev = ERR_PTR(ret); goto out; } EXPORT_SYMBOL_GPL(regulator_register); /** * regulator_unregister - unregister regulator * @rdev: regulator to unregister * * Called by regulator drivers to unregister a regulator. */ void regulator_unregister(struct regulator_dev *rdev) { if (rdev == NULL) return; if (rdev->supply) { while (rdev->use_count--) regulator_disable(rdev->supply); regulator_put(rdev->supply); } mutex_lock(&regulator_list_mutex); debugfs_remove_recursive(rdev->debugfs); flush_work(&rdev->disable_work.work); WARN_ON(rdev->open_count); unset_regulator_supplies(rdev); list_del(&rdev->list); kfree(rdev->constraints); regulator_ena_gpio_free(rdev); of_node_put(rdev->dev.of_node); device_unregister(&rdev->dev); mutex_unlock(&regulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_unregister); /** * regulator_suspend_prepare - prepare regulators for system wide suspend * @state: system suspend state * * Configure each regulator with it's suspend operating parameters for state. * This will usually be called by machine suspend code prior to supending. */ int regulator_suspend_prepare(suspend_state_t state) { struct regulator_dev *rdev; int ret = 0; /* ON is handled by regulator active state */ if (state == PM_SUSPEND_ON) return -EINVAL; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { mutex_lock(&rdev->mutex); ret = suspend_prepare(rdev, state); mutex_unlock(&rdev->mutex); if (ret < 0) { rdev_err(rdev, "failed to prepare\n"); goto out; } } out: mutex_unlock(&regulator_list_mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_suspend_prepare); /** * regulator_suspend_finish - resume regulators from system wide suspend * * Turn on regulators that might be turned off by regulator_suspend_prepare * and that should be turned on according to the regulators properties. */ int regulator_suspend_finish(void) { struct regulator_dev *rdev; int ret = 0, error; mutex_lock(&regulator_list_mutex); list_for_each_entry(rdev, &regulator_list, list) { mutex_lock(&rdev->mutex); if (rdev->use_count > 0 || rdev->constraints->always_on) { error = _regulator_do_enable(rdev); if (error) ret = error; } else { if (!have_full_constraints()) goto unlock; if (!_regulator_is_enabled(rdev)) goto unlock; error = _regulator_do_disable(rdev); if (error) ret = error; } unlock: mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return ret; } EXPORT_SYMBOL_GPL(regulator_suspend_finish); /** * regulator_has_full_constraints - the system has fully specified constraints * * Calling this function will cause the regulator API to disable all * regulators which have a zero use count and don't have an always_on * constraint in a late_initcall. * * The intention is that this will become the default behaviour in a * future kernel release so users are encouraged to use this facility * now. */ void regulator_has_full_constraints(void) { has_full_constraints = 1; } EXPORT_SYMBOL_GPL(regulator_has_full_constraints); /** * rdev_get_drvdata - get rdev regulator driver data * @rdev: regulator * * Get rdev regulator driver private data. This call can be used in the * regulator driver context. */ void *rdev_get_drvdata(struct regulator_dev *rdev) { return rdev->reg_data; } EXPORT_SYMBOL_GPL(rdev_get_drvdata); /** * regulator_get_drvdata - get regulator driver data * @regulator: regulator * * Get regulator driver private data. This call can be used in the consumer * driver context when non API regulator specific functions need to be called. */ void *regulator_get_drvdata(struct regulator *regulator) { return regulator->rdev->reg_data; } EXPORT_SYMBOL_GPL(regulator_get_drvdata); /** * regulator_set_drvdata - set regulator driver data * @regulator: regulator * @data: data */ void regulator_set_drvdata(struct regulator *regulator, void *data) { regulator->rdev->reg_data = data; } EXPORT_SYMBOL_GPL(regulator_set_drvdata); /** * regulator_get_id - get regulator ID * @rdev: regulator */ int rdev_get_id(struct regulator_dev *rdev) { return rdev->desc->id; } EXPORT_SYMBOL_GPL(rdev_get_id); struct device *rdev_get_dev(struct regulator_dev *rdev) { return &rdev->dev; } EXPORT_SYMBOL_GPL(rdev_get_dev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data) { return reg_init_data->driver_data; } EXPORT_SYMBOL_GPL(regulator_get_init_drvdata); #ifdef CONFIG_DEBUG_FS static ssize_t supply_map_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct regulator_map *map; if (!buf) return -ENOMEM; list_for_each_entry(map, &regulator_map_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s -> %s.%s\n", rdev_get_name(map->regulator), map->dev_name, map->supply); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } #endif static const struct file_operations supply_map_fops = { #ifdef CONFIG_DEBUG_FS .read = supply_map_read_file, .llseek = default_llseek, #endif }; static int __init regulator_init(void) { int ret; ret = class_register(&regulator_class); debugfs_root = debugfs_create_dir("regulator", NULL); if (!debugfs_root) pr_warn("regulator: Failed to create debugfs directory\n"); debugfs_create_file("supply_map", 0444, debugfs_root, NULL, &supply_map_fops); regulator_dummy_init(); return ret; } /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); static int __init regulator_init_complete(void) { struct regulator_dev *rdev; const struct regulator_ops *ops; struct regulation_constraints *c; int enabled, ret; /* * Since DT doesn't provide an idiomatic mechanism for * enabling full constraints and since it's much more natural * with DT to provide them just assume that a DT enabled * system has full constraints. */ if (of_have_populated_dt()) has_full_constraints = true; mutex_lock(&regulator_list_mutex); /* If we have a full configuration then disable any regulators * we have permission to change the status for and which are * not in use or always_on. This is effectively the default * for DT and ACPI as they have full constraints. */ list_for_each_entry(rdev, &regulator_list, list) { ops = rdev->desc->ops; c = rdev->constraints; if (c && c->always_on) continue; if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS)) continue; mutex_lock(&rdev->mutex); if (rdev->use_count) goto unlock; /* If we can't read the status assume it's on. */ if (ops->is_enabled) enabled = ops->is_enabled(rdev); else enabled = 1; if (!enabled) goto unlock; if (have_full_constraints()) { /* We log since this may kill the system if it * goes wrong. */ rdev_info(rdev, "disabling\n"); ret = _regulator_do_disable(rdev); if (ret != 0) rdev_err(rdev, "couldn't disable: %d\n", ret); } else { /* The intention is that in future we will * assume that full constraints are provided * so warn even if we aren't going to do * anything here. */ rdev_warn(rdev, "incomplete constraints, leaving on\n"); } unlock: mutex_unlock(&rdev->mutex); } mutex_unlock(&regulator_list_mutex); return 0; } late_initcall_sync(regulator_init_complete);
./CrossVul/dataset_final_sorted/CWE-416/c/bad_2440_0
crossvul-cpp_data_good_388_5
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2014 Davidlohr Bueso. */ #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/mm.h> #include <linux/vmacache.h> #include <asm/pgtable.h> /* * Hash based on the pmd of addr if configured with MMU, which provides a good * hit rate for workloads with spatial locality. Otherwise, use pages. */ #ifdef CONFIG_MMU #define VMACACHE_SHIFT PMD_SHIFT #else #define VMACACHE_SHIFT PAGE_SHIFT #endif #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK) /* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this * task's vmacache pertains to a different mm (ie, its own). There is * nothing we can do here. * * Also handle the case where a kernel thread has adopted this mm via use_mm(). * That kernel thread's vmacache is not applicable to this mm. */ static inline bool vmacache_valid_mm(struct mm_struct *mm) { return current->mm == mm && !(current->flags & PF_KTHREAD); } void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) { if (vmacache_valid_mm(newvma->vm_mm)) current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; } static bool vmacache_valid(struct mm_struct *mm) { struct task_struct *curr; if (!vmacache_valid_mm(mm)) return false; curr = current; if (mm->vmacache_seqnum != curr->vmacache.seqnum) { /* * First attempt will always be invalid, initialize * the new cache for this task here. */ curr->vmacache.seqnum = mm->vmacache_seqnum; vmacache_flush(curr); return false; } return true; } struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) { int idx = VMACACHE_HASH(addr); int i; count_vm_vmacache_event(VMACACHE_FIND_CALLS); if (!vmacache_valid(mm)) return NULL; for (i = 0; i < VMACACHE_SIZE; i++) { struct vm_area_struct *vma = current->vmacache.vmas[idx]; if (vma) { #ifdef CONFIG_DEBUG_VM_VMACACHE if (WARN_ON_ONCE(vma->vm_mm != mm)) break; #endif if (vma->vm_start <= addr && vma->vm_end > addr) { count_vm_vmacache_event(VMACACHE_FIND_HITS); return vma; } } if (++idx == VMACACHE_SIZE) idx = 0; } return NULL; } #ifndef CONFIG_MMU struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, unsigned long start, unsigned long end) { int idx = VMACACHE_HASH(start); int i; count_vm_vmacache_event(VMACACHE_FIND_CALLS); if (!vmacache_valid(mm)) return NULL; for (i = 0; i < VMACACHE_SIZE; i++) { struct vm_area_struct *vma = current->vmacache.vmas[idx]; if (vma && vma->vm_start == start && vma->vm_end == end) { count_vm_vmacache_event(VMACACHE_FIND_HITS); return vma; } if (++idx == VMACACHE_SIZE) idx = 0; } return NULL; } #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_388_5
crossvul-cpp_data_bad_5334_0
/* * fs/ioprio.c * * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk> * * Helper functions for setting/querying io priorities of processes. The * system calls closely mimmick getpriority/setpriority, see the man page for * those. The prio argument is a composite of prio class and prio data, where * the data argument has meaning within that class. The standard scheduling * classes have 8 distinct prio levels, with 0 being the highest prio and 7 * being the lowest. * * IOW, setting BE scheduling class with prio 2 is done ala: * * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; * * ioprio_set(PRIO_PROCESS, pid, prio); * * See also Documentation/block/ioprio.txt * */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/ioprio.h> #include <linux/blkdev.h> #include <linux/capability.h> #include <linux/syscalls.h> #include <linux/security.h> #include <linux/pid_namespace.h> int set_task_ioprio(struct task_struct *task, int ioprio) { int err; struct io_context *ioc; const struct cred *cred = current_cred(), *tcred; rcu_read_lock(); tcred = __task_cred(task); if (!uid_eq(tcred->uid, cred->euid) && !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); err = security_task_setioprio(task, ioprio); if (err) return err; ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); if (ioc) { ioc->ioprio = ioprio; put_io_context(ioc); } return err; } EXPORT_SYMBOL_GPL(set_task_ioprio); SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) { int class = IOPRIO_PRIO_CLASS(ioprio); int data = IOPRIO_PRIO_DATA(ioprio); struct task_struct *p, *g; struct user_struct *user; struct pid *pgrp; kuid_t uid; int ret; switch (class) { case IOPRIO_CLASS_RT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* fall through, rt has prio field too */ case IOPRIO_CLASS_BE: if (data >= IOPRIO_BE_NR || data < 0) return -EINVAL; break; case IOPRIO_CLASS_IDLE: break; case IOPRIO_CLASS_NONE: if (data) return -EINVAL; break; default: return -EINVAL; } ret = -ESRCH; rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = set_task_ioprio(p, ioprio); break; case IOPRIO_WHO_PGRP: if (!who) pgrp = task_pgrp(current); else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { ret = set_task_ioprio(p, ioprio); if (ret) break; } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case IOPRIO_WHO_USER: uid = make_kuid(current_user_ns(), who); if (!uid_valid(uid)) break; if (!who) user = current_user(); else user = find_user(uid); if (!user) break; do_each_thread(g, p) { if (!uid_eq(task_uid(p), uid) || !task_pid_vnr(p)) continue; ret = set_task_ioprio(p, ioprio); if (ret) goto free_uid; } while_each_thread(g, p); free_uid: if (who) free_uid(user); break; default: ret = -EINVAL; } rcu_read_unlock(); return ret; } static int get_task_ioprio(struct task_struct *p) { int ret; ret = security_task_getioprio(p); if (ret) goto out; ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); if (p->io_context) ret = p->io_context->ioprio; out: return ret; } int ioprio_best(unsigned short aprio, unsigned short bprio) { unsigned short aclass; unsigned short bclass; if (!ioprio_valid(aprio)) aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); if (!ioprio_valid(bprio)) bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); aclass = IOPRIO_PRIO_CLASS(aprio); bclass = IOPRIO_PRIO_CLASS(bprio); if (aclass == bclass) return min(aprio, bprio); if (aclass > bclass) return bprio; else return aprio; } SYSCALL_DEFINE2(ioprio_get, int, which, int, who) { struct task_struct *g, *p; struct user_struct *user; struct pid *pgrp; kuid_t uid; int ret = -ESRCH; int tmpio; rcu_read_lock(); switch (which) { case IOPRIO_WHO_PROCESS: if (!who) p = current; else p = find_task_by_vpid(who); if (p) ret = get_task_ioprio(p); break; case IOPRIO_WHO_PGRP: if (!who) pgrp = task_pgrp(current); else pgrp = find_vpid(who); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { tmpio = get_task_ioprio(p); if (tmpio < 0) continue; if (ret == -ESRCH) ret = tmpio; else ret = ioprio_best(ret, tmpio); } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case IOPRIO_WHO_USER: uid = make_kuid(current_user_ns(), who); if (!who) user = current_user(); else user = find_user(uid); if (!user) break; do_each_thread(g, p) { if (!uid_eq(task_uid(p), user->uid) || !task_pid_vnr(p)) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) continue; if (ret == -ESRCH) ret = tmpio; else ret = ioprio_best(ret, tmpio); } while_each_thread(g, p); if (who) free_uid(user); break; default: ret = -EINVAL; } rcu_read_unlock(); return ret; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_5334_0
crossvul-cpp_data_good_820_2
/* * "splice": joining two ropes together by interweaving their strands. * * This is the "extended pipe" functionality, where a pipe is used as * an arbitrary in-memory buffer. Think of a pipe as a small kernel * buffer that you can use to transfer data from one end to the other. * * The traditional unix read/write is extended with a "splice()" operation * that transfers data buffers to or from a pipe buffer. * * Named by Larry McVoy, original implementation from Linus, extended by * Jens to support splicing to files, network, direct splicing, etc and * fixing lots of bugs. * * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> * */ #include <linux/bvec.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/uio.h> #include <linux/security.h> #include <linux/gfp.h> #include <linux/socket.h> #include <linux/compat.h> #include <linux/sched/signal.h> #include "internal.h" /* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; struct address_space *mapping; lock_page(page); mapping = page_mapping(page); if (mapping) { WARN_ON(!PageUptodate(page)); /* * At least for ext2 with nobh option, we need to wait on * writeback completing on this page, since we'll remove it * from the pagecache. Otherwise truncate wont wait on the * page, allowing the disk blocks to be reused by someone else * before we actually wrote our data to them. fs corruption * ensues. */ wait_on_page_writeback(page); if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) goto out_unlock; /* * If we succeeded in removing the mapping, set LRU flag * and return good. */ if (remove_mapping(mapping, page)) { buf->flags |= PIPE_BUF_FLAG_LRU; return 0; } } /* * Raced with truncate or failed to remove page from current * address space, unlock and return failure. */ out_unlock: unlock_page(page); return 1; } static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { put_page(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } /* * Check whether the contents of buf is OK to access. Since the content * is a page cache page, IO may be in flight. */ static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; int err; if (!PageUptodate(page)) { lock_page(page); /* * Page got truncated/unhashed. This will cause a 0-byte * splice, if this is the first page. */ if (!page->mapping) { err = -ENODATA; goto error; } /* * Uh oh, read-error from disk. */ if (!PageUptodate(page)) { err = -EIO; goto error; } /* * Page is ok afterall, we are done. */ unlock_page(page); } return 0; error: unlock_page(page); return err; } const struct pipe_buf_operations page_cache_pipe_buf_ops = { .confirm = page_cache_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = page_cache_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) return 1; buf->flags |= PIPE_BUF_FLAG_LRU; return generic_pipe_buf_steal(pipe, buf); } static const struct pipe_buf_operations user_page_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .steal = user_page_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void wakeup_pipe_readers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } /** * splice_to_pipe - fill passed data into a pipe * @pipe: pipe to fill * @spd: data to fill * * Description: * @spd contains a map of pages and len/offset tuples, along with * the struct pipe_buf_operations associated with these pages. This * function will link that data to the pipe. * */ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int spd_pages = spd->nr_pages; int ret = 0, page_nr = 0; if (!spd_pages) return 0; if (unlikely(!pipe->readers)) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } while (pipe->nrbufs < pipe->buffers) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; buf->flags = 0; pipe->nrbufs++; page_nr++; ret += buf->len; if (!--spd->nr_pages) break; } if (!ret) ret = -EAGAIN; out: while (page_nr < spd_pages) spd->spd_release(spd, page_nr++); return ret; } EXPORT_SYMBOL_GPL(splice_to_pipe); ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { int ret; if (unlikely(!pipe->readers)) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; } else if (pipe->nrbufs == pipe->buffers) { ret = -EAGAIN; } else { int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); pipe->bufs[newbuf] = *buf; pipe->nrbufs++; return buf->len; } pipe_buf_release(pipe, buf); return ret; } EXPORT_SYMBOL(add_to_pipe); /* * Check if we need to grow the arrays holding pages and partial page * descriptions. */ int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int buffers = READ_ONCE(pipe->buffers); spd->nr_pages_max = buffers; if (buffers <= PIPE_DEF_BUFFERS) return 0; spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL); spd->partial = kmalloc_array(buffers, sizeof(struct partial_page), GFP_KERNEL); if (spd->pages && spd->partial) return 0; kfree(spd->pages); kfree(spd->partial); return -ENOMEM; } void splice_shrink_spd(struct splice_pipe_desc *spd) { if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) return; kfree(spd->pages); kfree(spd->partial); } /** * generic_file_splice_read - splice data from file to a pipe * @in: file to splice from * @ppos: position in @in * @pipe: pipe to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will read pages from given file and fill them into a pipe. Can be * used as long as it has more or less sane ->read_iter(). * */ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct iov_iter to; struct kiocb kiocb; int idx, ret; iov_iter_pipe(&to, READ, pipe, len); idx = to.idx; init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; ret = call_read_iter(in, &kiocb, &to); if (ret > 0) { *ppos = kiocb.ki_pos; file_accessed(in); } else if (ret < 0) { to.idx = idx; to.iov_offset = 0; iov_iter_advance(&to, 0); /* to free what was emitted */ /* * callers of ->splice_read() expect -EAGAIN on * "can't put anything in there", rather than -EFAULT. */ if (ret == -EFAULT) ret = -EAGAIN; } return ret; } EXPORT_SYMBOL(generic_file_splice_read); const struct pipe_buf_operations default_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return 1; } /* Pipe buffer operations for a socket and similar. */ const struct pipe_buf_operations nosteal_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_nosteal, .get = generic_pipe_buf_get, }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); static ssize_t kernel_readv(struct file *file, const struct kvec *vec, unsigned long vlen, loff_t offset) { mm_segment_t old_fs; loff_t pos = offset; ssize_t res; old_fs = get_fs(); set_fs(KERNEL_DS); /* The cast to a user pointer is valid due to the set_fs() */ res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos, 0); set_fs(old_fs); return res; } static ssize_t default_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct kvec *vec, __vec[PIPE_DEF_BUFFERS]; struct iov_iter to; struct page **pages; unsigned int nr_pages; size_t offset, base, copied = 0; ssize_t res; int i; if (pipe->nrbufs == pipe->buffers) return -EAGAIN; /* * Try to keep page boundaries matching to source pagecache ones - * it probably won't be much help, but... */ offset = *ppos & ~PAGE_MASK; iov_iter_pipe(&to, READ, pipe, len + offset); res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base); if (res <= 0) return -ENOMEM; nr_pages = DIV_ROUND_UP(res + base, PAGE_SIZE); vec = __vec; if (nr_pages > PIPE_DEF_BUFFERS) { vec = kmalloc_array(nr_pages, sizeof(struct kvec), GFP_KERNEL); if (unlikely(!vec)) { res = -ENOMEM; goto out; } } pipe->bufs[to.idx].offset = offset; pipe->bufs[to.idx].len -= offset; for (i = 0; i < nr_pages; i++) { size_t this_len = min_t(size_t, len, PAGE_SIZE - offset); vec[i].iov_base = page_address(pages[i]) + offset; vec[i].iov_len = this_len; len -= this_len; offset = 0; } res = kernel_readv(in, vec, nr_pages, *ppos); if (res > 0) { copied = res; *ppos += res; } if (vec != __vec) kfree(vec); out: for (i = 0; i < nr_pages; i++) put_page(pages[i]); kvfree(pages); iov_iter_advance(&to, copied); /* truncates and discards */ return res; } /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' * using sendpage(). Return the number of bytes sent. */ static int pipe_to_sendpage(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct file *file = sd->u.file; loff_t pos = sd->pos; int more; if (!likely(file->f_op->sendpage)) return -EINVAL; more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; if (sd->len < sd->total_len && pipe->nrbufs > 1) more |= MSG_SENDPAGE_NOTLAST; return file->f_op->sendpage(file, buf->page, buf->offset, sd->len, &pos, more); } static void wakeup_pipe_writers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } /** * splice_from_pipe_feed - feed available data from a pipe to a file * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function loops over the pipe and calls @actor to do the * actual moving of a single struct pipe_buffer to the desired * destination. It returns when there's no more buffers left in * the pipe or if the requested number of bytes (@sd->total_len) * have been copied. It returns a positive number (one) if the * pipe needs to be filled with more data, zero if the required * number of bytes have been copied and -errno on error. * * This, together with splice_from_pipe_{begin,end,next}, may be * used to implement the functionality of __splice_from_pipe() when * locking is required around copying the pipe buffers to the * destination. */ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; while (pipe->nrbufs) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; sd->len = buf->len; if (sd->len > sd->total_len) sd->len = sd->total_len; ret = pipe_buf_confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } ret = actor(pipe, buf, sd); if (ret <= 0) return ret; buf->offset += ret; buf->len -= ret; sd->num_spliced += ret; sd->len -= ret; sd->pos += ret; sd->total_len -= ret; if (!buf->len) { pipe_buf_release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; if (pipe->files) sd->need_wakeup = true; } if (!sd->total_len) return 0; } return 1; } /** * splice_from_pipe_next - wait for some data to splice from * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wait for some data and return a positive * value (one) if pipe buffers are available. It will return zero * or -errno if no more data needs to be spliced. */ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { /* * Check for signal early to make process killable when there are * always buffers available */ if (signal_pending(current)) return -ERESTARTSYS; while (!pipe->nrbufs) { if (!pipe->writers) return 0; if (!pipe->waiting_writers && sd->num_spliced) return 0; if (sd->flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; if (sd->need_wakeup) { wakeup_pipe_writers(pipe); sd->need_wakeup = false; } pipe_wait(pipe); } return 1; } /** * splice_from_pipe_begin - start splicing from pipe * @sd: information about the splice operation * * Description: * This function should be called before a loop containing * splice_from_pipe_next() and splice_from_pipe_feed() to * initialize the necessary fields of @sd. */ static void splice_from_pipe_begin(struct splice_desc *sd) { sd->num_spliced = 0; sd->need_wakeup = false; } /** * splice_from_pipe_end - finish splicing from pipe * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wake up pipe writers if necessary. It should * be called after a loop containing splice_from_pipe_next() and * splice_from_pipe_feed(). */ static void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) { if (sd->need_wakeup) wakeup_pipe_writers(pipe); } /** * __splice_from_pipe - splice data from a pipe to given actor * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function does little more than loop over the pipe and call * @actor to do the actual moving of a single struct pipe_buffer to * the desired destination. See pipe_to_file, pipe_to_sendpage, or * pipe_to_user. * */ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; splice_from_pipe_begin(sd); do { cond_resched(); ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); } while (ret > 0); splice_from_pipe_end(pipe, sd); return sd->num_spliced ? sd->num_spliced : ret; } EXPORT_SYMBOL(__splice_from_pipe); /** * splice_from_pipe - splice data from a pipe to a file * @pipe: pipe to splice from * @out: file to splice to * @ppos: position in @out * @len: how many bytes to splice * @flags: splice modifier flags * @actor: handler that splices the data * * Description: * See __splice_from_pipe. This function locks the pipe inode, * otherwise it's identical to __splice_from_pipe(). * */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { ssize_t ret; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, actor); pipe_unlock(pipe); return ret; } /** * iter_file_splice_write - splice data from a pipe to a file * @pipe: pipe info * @out: file to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. * This one is ->write_iter-based. * */ ssize_t iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; int nbufs = pipe->buffers; struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL); ssize_t ret; if (unlikely(!array)) return -ENOMEM; pipe_lock(pipe); splice_from_pipe_begin(&sd); while (sd.total_len) { struct iov_iter from; size_t left; int n, idx; ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; if (unlikely(nbufs < pipe->buffers)) { kfree(array); nbufs = pipe->buffers; array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL); if (!array) { ret = -ENOMEM; break; } } /* build the vector */ left = sd.total_len; for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) { struct pipe_buffer *buf = pipe->bufs + idx; size_t this_len = buf->len; if (this_len > left) this_len = left; if (idx == pipe->buffers - 1) idx = -1; ret = pipe_buf_confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; goto done; } array[n].bv_page = buf->page; array[n].bv_len = this_len; array[n].bv_offset = buf->offset; left -= this_len; } iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left); ret = vfs_iter_write(out, &from, &sd.pos, 0); if (ret <= 0) break; sd.num_spliced += ret; sd.total_len -= ret; *ppos = sd.pos; /* dismiss the fully eaten buffers, adjust the partial one */ while (ret) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; if (ret >= buf->len) { ret -= buf->len; buf->len = 0; pipe_buf_release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; if (pipe->files) sd.need_wakeup = true; } else { buf->offset += ret; buf->len -= ret; ret = 0; } } } done: kfree(array); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; return ret; } EXPORT_SYMBOL(iter_file_splice_write); static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int ret; void *data; loff_t tmp = sd->pos; data = kmap(buf->page); ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); kunmap(buf->page); return ret; } static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t ret; ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); if (ret > 0) *ppos += ret; return ret; } /** * generic_splice_sendpage - splice data from a pipe to a socket * @pipe: pipe to splice from * @out: socket to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will send @len bytes from the pipe to a network socket. No data copying * is involved. * */ ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); } EXPORT_SYMBOL(generic_splice_sendpage); /* * Attempt to initiate a splice from pipe to file. */ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); if (out->f_op->splice_write) splice_write = out->f_op->splice_write; else splice_write = default_file_splice_write; return splice_write(pipe, out, ppos, len, flags); } /* * Attempt to initiate a splice from a file to a pipe. */ static long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int ret; if (unlikely(!(in->f_mode & FMODE_READ))) return -EBADF; ret = rw_verify_area(READ, in, ppos, len); if (unlikely(ret < 0)) return ret; if (unlikely(len > MAX_RW_COUNT)) len = MAX_RW_COUNT; if (in->f_op->splice_read) splice_read = in->f_op->splice_read; else splice_read = default_file_splice_read; return splice_read(in, ppos, pipe, len, flags); } /** * splice_direct_to_actor - splices data directly between two non-pipes * @in: file to splice from * @sd: actor information on where to splice to * @actor: handles the data splicing * * Description: * This is a special case helper to splice directly between two * points, without requiring an explicit pipe. Internally an allocated * pipe is cached in the process, and reused during the lifetime of * that process. * */ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, splice_direct_actor *actor) { struct pipe_inode_info *pipe; long ret, bytes; umode_t i_mode; size_t len; int i, flags, more; /* * We require the input being a regular file, as we don't want to * randomly drop data for eg socket -> socket splicing. Use the * piped splicing for that! */ i_mode = file_inode(in)->i_mode; if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) return -EINVAL; /* * neither in nor out is a pipe, setup an internal pipe attached to * 'out' and transfer the wanted data from 'in' to 'out' through that */ pipe = current->splice_pipe; if (unlikely(!pipe)) { pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; /* * We don't have an immediate reader, but we'll read the stuff * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; current->splice_pipe = pipe; } /* * Do the splice. */ ret = 0; bytes = 0; len = sd->total_len; flags = sd->flags; /* * Don't block on output, we have to drain the direct pipe. */ sd->flags &= ~SPLICE_F_NONBLOCK; more = sd->flags & SPLICE_F_MORE; WARN_ON_ONCE(pipe->nrbufs != 0); while (len) { size_t read_len; loff_t pos = sd->pos, prev_pos = pos; /* Don't try to read more the pipe has space for. */ read_len = min_t(size_t, len, (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT); ret = do_splice_to(in, &pos, pipe, read_len, flags); if (unlikely(ret <= 0)) goto out_release; read_len = ret; sd->total_len = read_len; /* * If more data is pending, set SPLICE_F_MORE * If this is the last data and SPLICE_F_MORE was not set * initially, clears it. */ if (read_len < len) sd->flags |= SPLICE_F_MORE; else if (!more) sd->flags &= ~SPLICE_F_MORE; /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: */ ret = actor(pipe, sd); if (unlikely(ret <= 0)) { sd->pos = prev_pos; goto out_release; } bytes += ret; len -= ret; sd->pos = pos; if (ret < read_len) { sd->pos = prev_pos + ret; goto out_release; } } done: pipe->nrbufs = pipe->curbuf = 0; file_accessed(in); return bytes; out_release: /* * If we did an incomplete transfer we must release * the pipe buffers in question: */ for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) pipe_buf_release(pipe, buf); } if (!bytes) bytes = ret; goto done; } EXPORT_SYMBOL(splice_direct_to_actor); static int direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { struct file *file = sd->u.file; return do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags); } /** * do_splice_direct - splices data directly between two files * @in: file to splice from * @ppos: input file offset * @out: file to splice to * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * For use by do_sendfile(). splice can easily emulate sendfile, but * doing it in the application would incur an extra system call * (splice in + splice out, as compared to just sendfile()). So this helper * can splice directly through a process-private pipe. * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, .opos = opos, }; long ret; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, opos, len); if (unlikely(ret < 0)) return ret; ret = splice_direct_to_actor(in, &sd, direct_splice_actor); if (ret > 0) *ppos = sd.pos; return ret; } EXPORT_SYMBOL(do_splice_direct); static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags) { for (;;) { if (unlikely(!pipe->readers)) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (pipe->nrbufs != pipe->buffers) return 0; if (flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } } static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags); /* * Determine where to splice to/from. */ static long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; loff_t offset; long ret; ipipe = get_pipe_info(in); opipe = get_pipe_info(out); if (ipipe && opipe) { if (off_in || off_out) return -ESPIPE; if (!(in->f_mode & FMODE_READ)) return -EBADF; if (!(out->f_mode & FMODE_WRITE)) return -EBADF; /* Splicing to self would be fun, but... */ if (ipipe == opipe) return -EINVAL; if ((in->f_flags | out->f_flags) & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; return splice_pipe_to_pipe(ipipe, opipe, len, flags); } if (ipipe) { if (off_in) return -ESPIPE; if (off_out) { if (!(out->f_mode & FMODE_PWRITE)) return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; } else { offset = out->f_pos; } if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, &offset, len); if (unlikely(ret < 0)) return ret; if (in->f_flags & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; file_start_write(out); ret = do_splice_from(ipipe, out, &offset, len, flags); file_end_write(out); if (!off_out) out->f_pos = offset; else if (copy_to_user(off_out, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; } if (opipe) { if (off_out) return -ESPIPE; if (off_in) { if (!(in->f_mode & FMODE_PREAD)) return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; } else { offset = in->f_pos; } if (out->f_flags & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; pipe_lock(opipe); ret = wait_for_space(opipe, flags); if (!ret) ret = do_splice_to(in, &offset, opipe, len, flags); pipe_unlock(opipe); if (ret > 0) wakeup_pipe_readers(opipe); if (!off_in) in->f_pos = offset; else if (copy_to_user(off_in, &offset, sizeof(loff_t))) ret = -EFAULT; return ret; } return -EINVAL; } static int iter_to_pipe(struct iov_iter *from, struct pipe_inode_info *pipe, unsigned flags) { struct pipe_buffer buf = { .ops = &user_page_pipe_buf_ops, .flags = flags }; size_t total = 0; int ret = 0; bool failed = false; while (iov_iter_count(from) && !failed) { struct page *pages[16]; ssize_t copied; size_t start; int n; copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start); if (copied <= 0) { ret = copied; break; } for (n = 0; copied; n++, start = 0) { int size = min_t(int, copied, PAGE_SIZE - start); if (!failed) { buf.page = pages[n]; buf.offset = start; buf.len = size; ret = add_to_pipe(pipe, &buf); if (unlikely(ret < 0)) { failed = true; } else { iov_iter_advance(from, ret); total += ret; } } else { put_page(pages[n]); } copied -= size; } } return total ? total : ret; } static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data); return n == sd->len ? n : -EFAULT; } /* * For lack of a better implementation, implement vmsplice() to userspace * as a simple copy of the pipes pages to the user iov. */ static long vmsplice_to_user(struct file *file, struct iov_iter *iter, unsigned int flags) { struct pipe_inode_info *pipe = get_pipe_info(file); struct splice_desc sd = { .total_len = iov_iter_count(iter), .flags = flags, .u.data = iter }; long ret = 0; if (!pipe) return -EBADF; if (sd.total_len) { pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, pipe_to_user); pipe_unlock(pipe); } return ret; } /* * vmsplice splices a user address range into a pipe. It can be thought of * as splice-from-memory, where the regular splice is splice-from-file (or * to file). In both cases the output is a pipe, naturally. */ static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter, unsigned int flags) { struct pipe_inode_info *pipe; long ret = 0; unsigned buf_flag = 0; if (flags & SPLICE_F_GIFT) buf_flag = PIPE_BUF_FLAG_GIFT; pipe = get_pipe_info(file); if (!pipe) return -EBADF; pipe_lock(pipe); ret = wait_for_space(pipe, flags); if (!ret) ret = iter_to_pipe(iter, pipe, buf_flag); pipe_unlock(pipe); if (ret > 0) wakeup_pipe_readers(pipe); return ret; } static int vmsplice_type(struct fd f, int *type) { if (!f.file) return -EBADF; if (f.file->f_mode & FMODE_WRITE) { *type = WRITE; } else if (f.file->f_mode & FMODE_READ) { *type = READ; } else { fdput(f); return -EBADF; } return 0; } /* * Note that vmsplice only really supports true splicing _from_ user memory * to a pipe, not the other way around. Splicing from user memory is a simple * operation that can be supported without any funky alignment restrictions * or nasty vm tricks. We simply map in the user memory and fill them into * a pipe. The reverse isn't quite as easy, though. There are two possible * solutions for that: * * - memcpy() the data internally, at which point we might as well just * do a regular read() on the buffer anyway. * - Lots of nasty vm tricks, that are neither fast nor flexible (it * has restriction limitations on both ends of the pipe). * * Currently we punt and implement it as a normal copy, see pipe_to_user(). * */ static long do_vmsplice(struct file *f, struct iov_iter *iter, unsigned int flags) { if (unlikely(flags & ~SPLICE_F_ALL)) return -EINVAL; if (!iov_iter_count(iter)) return 0; if (iov_iter_rw(iter) == WRITE) return vmsplice_to_pipe(f, iter, flags); else return vmsplice_to_user(f, iter, flags); } SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov, unsigned long, nr_segs, unsigned int, flags) { struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; long error; struct fd f; int type; f = fdget(fd); error = vmsplice_type(f, &type); if (error) return error; error = import_iovec(type, uiov, nr_segs, ARRAY_SIZE(iovstack), &iov, &iter); if (!error) { error = do_vmsplice(f.file, &iter, flags); kfree(iov); } fdput(f); return error; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, unsigned int, nr_segs, unsigned int, flags) { struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; long error; struct fd f; int type; f = fdget(fd); error = vmsplice_type(f, &type); if (error) return error; error = compat_import_iovec(type, iov32, nr_segs, ARRAY_SIZE(iovstack), &iov, &iter); if (!error) { error = do_vmsplice(f.file, &iter, flags); kfree(iov); } fdput(f); return error; } #endif SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags) { struct fd in, out; long error; if (unlikely(!len)) return 0; if (unlikely(flags & ~SPLICE_F_ALL)) return -EINVAL; error = -EBADF; in = fdget(fd_in); if (in.file) { if (in.file->f_mode & FMODE_READ) { out = fdget(fd_out); if (out.file) { if (out.file->f_mode & FMODE_WRITE) error = do_splice(in.file, off_in, out.file, off_out, len, flags); fdput(out); } } fdput(in); } return error; } /* * Make sure there's data to read. Wait for input if we can, otherwise * return an appropriate error. */ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs) return 0; ret = 0; pipe_lock(pipe); while (!pipe->nrbufs) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!pipe->writers) break; if (!pipe->waiting_writers) { if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } } pipe_wait(pipe); } pipe_unlock(pipe); return ret; } /* * Make sure there's writeable room. Wait for room if we can, otherwise * return an appropriate error. */ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs < pipe->buffers) return 0; ret = 0; pipe_lock(pipe); while (pipe->nrbufs >= pipe->buffers) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; break; } if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } pipe_unlock(pipe); return ret; } /* * Splice contents of ipipe to opipe. */ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, nbuf; bool input_wakeup = false; retry: ret = ipipe_prep(ipipe, flags); if (ret) return ret; ret = opipe_prep(opipe, flags); if (ret) return ret; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (!ipipe->nrbufs && !ipipe->writers) break; /* * Cannot make any progress, because either the input * pipe is empty or the output pipe is full. */ if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) { /* Already processed some buffers, break */ if (ret) break; if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } /* * We raced with another reader/writer and haven't * managed to process any buffers. A zero return * value means EOF, so retry instead. */ pipe_unlock(ipipe); pipe_unlock(opipe); goto retry; } ibuf = ipipe->bufs + ipipe->curbuf; nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); obuf = opipe->bufs + nbuf; if (len >= ibuf->len) { /* * Simply move the whole buffer from ipipe to opipe */ *obuf = *ibuf; ibuf->ops = NULL; opipe->nrbufs++; ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1); ipipe->nrbufs--; input_wakeup = true; } else { /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ if (!pipe_buf_get(ipipe, ibuf)) { if (ret == 0) ret = -EFAULT; break; } *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; pipe_buf_mark_unmergeable(obuf); obuf->len = len; opipe->nrbufs++; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } ret += obuf->len; len -= obuf->len; } while (len); pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); if (input_wakeup) wakeup_pipe_writers(ipipe); return ret; } /* * Link contents of ipipe to opipe. */ static int link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; int ret = 0, i = 0, nbuf; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } /* * If we have iterated all input buffers or ran out of * output room, break. */ if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) break; ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ if (!pipe_buf_get(ipipe, ibuf)) { if (ret == 0) ret = -EFAULT; break; } obuf = opipe->bufs + nbuf; *obuf = *ibuf; /* * Don't inherit the gift flag, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; pipe_buf_mark_unmergeable(obuf); if (obuf->len > len) obuf->len = len; opipe->nrbufs++; ret += obuf->len; len -= obuf->len; i++; } while (len); /* * return EAGAIN if we have the potential of some data in the * future, otherwise just return 0 */ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ret = -EAGAIN; pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; } /* * This is a tee(1) implementation that works on pipes. It doesn't copy * any data, it simply references the 'in' pages on the 'out' pipe. * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ static long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in); struct pipe_inode_info *opipe = get_pipe_info(out); int ret = -EINVAL; /* * Duplicate the contents of ipipe to opipe without actually * copying the data. */ if (ipipe && opipe && ipipe != opipe) { if ((in->f_flags | out->f_flags) & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; /* * Keep going, unless we encounter an error. The ipipe/opipe * ordering doesn't really matter. */ ret = ipipe_prep(ipipe, flags); if (!ret) { ret = opipe_prep(opipe, flags); if (!ret) ret = link_pipe(ipipe, opipe, len, flags); } } return ret; } SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) { struct fd in; int error; if (unlikely(flags & ~SPLICE_F_ALL)) return -EINVAL; if (unlikely(!len)) return 0; error = -EBADF; in = fdget(fdin); if (in.file) { if (in.file->f_mode & FMODE_READ) { struct fd out = fdget(fdout); if (out.file) { if (out.file->f_mode & FMODE_WRITE) error = do_tee(in.file, out.file, len, flags); fdput(out); } } fdput(in); } return error; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_820_2
crossvul-cpp_data_bad_3242_0
#include "mongoose.h" #ifdef MG_MODULE_LINES #line 1 "mongoose/src/internal.h" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #ifndef CS_MONGOOSE_SRC_INTERNAL_H_ #define CS_MONGOOSE_SRC_INTERNAL_H_ /* Amalgamated: #include "common/mg_mem.h" */ #ifndef MBUF_REALLOC #define MBUF_REALLOC MG_REALLOC #endif #ifndef MBUF_FREE #define MBUF_FREE MG_FREE #endif #define MG_SET_PTRPTR(_ptr, _v) \ do { \ if (_ptr) *(_ptr) = _v; \ } while (0) #ifndef MG_INTERNAL #define MG_INTERNAL static #endif #ifdef PICOTCP #define NO_LIBC #define MG_DISABLE_PFS #endif /* Amalgamated: #include "mongoose/src/net.h" */ /* Amalgamated: #include "mongoose/src/http.h" */ /* Amalgamated: #include "common/cs_dbg.h" */ #define MG_CTL_MSG_MESSAGE_SIZE 8192 /* internals that need to be accessible in unit tests */ MG_INTERNAL struct mg_connection *mg_do_connect(struct mg_connection *nc, int proto, union socket_address *sa); MG_INTERNAL int mg_parse_address(const char *str, union socket_address *sa, int *proto, char *host, size_t host_len); MG_INTERNAL void mg_call(struct mg_connection *nc, mg_event_handler_t ev_handler, void *user_data, int ev, void *ev_data); void mg_forward(struct mg_connection *from, struct mg_connection *to); MG_INTERNAL void mg_add_conn(struct mg_mgr *mgr, struct mg_connection *c); MG_INTERNAL void mg_remove_conn(struct mg_connection *c); MG_INTERNAL struct mg_connection *mg_create_connection( struct mg_mgr *mgr, mg_event_handler_t callback, struct mg_add_sock_opts opts); #ifdef _WIN32 /* Retur value is the same as for MultiByteToWideChar. */ int to_wchar(const char *path, wchar_t *wbuf, size_t wbuf_len); #endif struct ctl_msg { mg_event_handler_t callback; char message[MG_CTL_MSG_MESSAGE_SIZE]; }; #if MG_ENABLE_MQTT struct mg_mqtt_message; MG_INTERNAL int parse_mqtt(struct mbuf *io, struct mg_mqtt_message *mm); #endif /* Forward declarations for testing. */ extern void *(*test_malloc)(size_t size); extern void *(*test_calloc)(size_t count, size_t size); #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #if MG_ENABLE_HTTP struct mg_serve_http_opts; /* * Reassemble the content of the buffer (buf, blen) which should be * in the HTTP chunked encoding, by collapsing data chunks to the * beginning of the buffer. * * If chunks get reassembled, modify hm->body to point to the reassembled * body and fire MG_EV_HTTP_CHUNK event. If handler sets MG_F_DELETE_CHUNK * in nc->flags, delete reassembled body from the mbuf. * * Return reassembled body size. */ MG_INTERNAL size_t mg_handle_chunked(struct mg_connection *nc, struct http_message *hm, char *buf, size_t blen); MG_INTERNAL int mg_http_common_url_parse(const char *url, const char *schema, const char *schema_tls, int *use_ssl, char **user, char **pass, char **addr, int *port_i, const char **path); #if MG_ENABLE_FILESYSTEM MG_INTERNAL int mg_uri_to_local_path(struct http_message *hm, const struct mg_serve_http_opts *opts, char **local_path, struct mg_str *remainder); MG_INTERNAL time_t mg_parse_date_string(const char *datetime); MG_INTERNAL int mg_is_not_modified(struct http_message *hm, cs_stat_t *st); #endif #if MG_ENABLE_HTTP_CGI MG_INTERNAL void mg_handle_cgi(struct mg_connection *nc, const char *prog, const struct mg_str *path_info, const struct http_message *hm, const struct mg_serve_http_opts *opts); struct mg_http_proto_data_cgi; MG_INTERNAL void mg_http_free_proto_data_cgi(struct mg_http_proto_data_cgi *d); #endif #if MG_ENABLE_HTTP_SSI MG_INTERNAL void mg_handle_ssi_request(struct mg_connection *nc, struct http_message *hm, const char *path, const struct mg_serve_http_opts *opts); #endif #if MG_ENABLE_HTTP_WEBDAV MG_INTERNAL int mg_is_dav_request(const struct mg_str *s); MG_INTERNAL void mg_handle_propfind(struct mg_connection *nc, const char *path, cs_stat_t *stp, struct http_message *hm, struct mg_serve_http_opts *opts); MG_INTERNAL void mg_handle_lock(struct mg_connection *nc, const char *path); MG_INTERNAL void mg_handle_mkcol(struct mg_connection *nc, const char *path, struct http_message *hm); MG_INTERNAL void mg_handle_move(struct mg_connection *c, const struct mg_serve_http_opts *opts, const char *path, struct http_message *hm); MG_INTERNAL void mg_handle_delete(struct mg_connection *nc, const struct mg_serve_http_opts *opts, const char *path); MG_INTERNAL void mg_handle_put(struct mg_connection *nc, const char *path, struct http_message *hm); #endif #if MG_ENABLE_HTTP_WEBSOCKET MG_INTERNAL void mg_ws_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)); MG_INTERNAL void mg_ws_handshake(struct mg_connection *nc, const struct mg_str *key); #endif #endif /* MG_ENABLE_HTTP */ MG_INTERNAL int mg_get_errno(void); MG_INTERNAL void mg_close_conn(struct mg_connection *conn); MG_INTERNAL int mg_http_common_url_parse(const char *url, const char *schema, const char *schema_tls, int *use_ssl, char **user, char **pass, char **addr, int *port_i, const char **path); #if MG_ENABLE_SNTP MG_INTERNAL int mg_sntp_parse_reply(const char *buf, int len, struct mg_sntp_message *msg); #endif #endif /* CS_MONGOOSE_SRC_INTERNAL_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/mg_mem.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_MG_MEM_H_ #define CS_COMMON_MG_MEM_H_ #ifndef MG_MALLOC #define MG_MALLOC malloc #endif #ifndef MG_CALLOC #define MG_CALLOC calloc #endif #ifndef MG_REALLOC #define MG_REALLOC realloc #endif #ifndef MG_FREE #define MG_FREE free #endif #endif /* CS_COMMON_MG_MEM_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/cs_dbg.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_CS_DBG_H_ #define CS_COMMON_CS_DBG_H_ /* Amalgamated: #include "common/platform.h" */ #if CS_ENABLE_STDIO #include <stdio.h> #endif #ifndef CS_ENABLE_DEBUG #define CS_ENABLE_DEBUG 0 #endif #ifndef CS_LOG_ENABLE_TS_DIFF #define CS_LOG_ENABLE_TS_DIFF 0 #endif #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ enum cs_log_level { LL_NONE = -1, LL_ERROR = 0, LL_WARN = 1, LL_INFO = 2, LL_DEBUG = 3, LL_VERBOSE_DEBUG = 4, _LL_MIN = -2, _LL_MAX = 5, }; void cs_log_set_level(enum cs_log_level level); #if CS_ENABLE_STDIO void cs_log_set_file(FILE *file); extern enum cs_log_level cs_log_level; void cs_log_print_prefix(const char *func); void cs_log_printf(const char *fmt, ...); #define LOG(l, x) \ do { \ if (cs_log_level >= l) { \ cs_log_print_prefix(__func__); \ cs_log_printf x; \ } \ } while (0) #ifndef CS_NDEBUG #define DBG(x) \ do { \ if (cs_log_level >= LL_VERBOSE_DEBUG) { \ cs_log_print_prefix(__func__); \ cs_log_printf x; \ } \ } while (0) #else /* NDEBUG */ #define DBG(x) #endif #else /* CS_ENABLE_STDIO */ #define LOG(l, x) #define DBG(x) #endif #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* CS_COMMON_CS_DBG_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/cs_dbg.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "common/cs_dbg.h" */ #include <stdarg.h> #include <stdio.h> #include <string.h> /* Amalgamated: #include "common/cs_time.h" */ enum cs_log_level cs_log_level WEAK = #if CS_ENABLE_DEBUG LL_VERBOSE_DEBUG; #else LL_ERROR; #endif #if CS_ENABLE_STDIO FILE *cs_log_file WEAK = NULL; #if CS_LOG_ENABLE_TS_DIFF double cs_log_ts WEAK; #endif void cs_log_print_prefix(const char *func) WEAK; void cs_log_print_prefix(const char *func) { char prefix[21]; strncpy(prefix, func, 20); prefix[20] = '\0'; if (cs_log_file == NULL) cs_log_file = stderr; fprintf(cs_log_file, "%-20s ", prefix); #if CS_LOG_ENABLE_TS_DIFF { double now = cs_time(); fprintf(cs_log_file, "%7u ", (unsigned int) ((now - cs_log_ts) * 1000000)); cs_log_ts = now; } #endif } void cs_log_printf(const char *fmt, ...) WEAK; void cs_log_printf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(cs_log_file, fmt, ap); va_end(ap); fputc('\n', cs_log_file); fflush(cs_log_file); } void cs_log_set_file(FILE *file) WEAK; void cs_log_set_file(FILE *file) { cs_log_file = file; } #endif /* CS_ENABLE_STDIO */ void cs_log_set_level(enum cs_log_level level) WEAK; void cs_log_set_level(enum cs_log_level level) { cs_log_level = level; #if CS_LOG_ENABLE_TS_DIFF && CS_ENABLE_STDIO cs_log_ts = cs_time(); #endif } #ifdef MG_MODULE_LINES #line 1 "common/base64.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #ifndef EXCLUDE_COMMON /* Amalgamated: #include "common/base64.h" */ #include <string.h> /* Amalgamated: #include "common/cs_dbg.h" */ /* ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ */ #define NUM_UPPERCASES ('Z' - 'A' + 1) #define NUM_LETTERS (NUM_UPPERCASES * 2) #define NUM_DIGITS ('9' - '0' + 1) /* * Emit a base64 code char. * * Doesn't use memory, thus it's safe to use to safely dump memory in crashdumps */ static void cs_base64_emit_code(struct cs_base64_ctx *ctx, int v) { if (v < NUM_UPPERCASES) { ctx->b64_putc(v + 'A', ctx->user_data); } else if (v < (NUM_LETTERS)) { ctx->b64_putc(v - NUM_UPPERCASES + 'a', ctx->user_data); } else if (v < (NUM_LETTERS + NUM_DIGITS)) { ctx->b64_putc(v - NUM_LETTERS + '0', ctx->user_data); } else { ctx->b64_putc(v - NUM_LETTERS - NUM_DIGITS == 0 ? '+' : '/', ctx->user_data); } } static void cs_base64_emit_chunk(struct cs_base64_ctx *ctx) { int a, b, c; a = ctx->chunk[0]; b = ctx->chunk[1]; c = ctx->chunk[2]; cs_base64_emit_code(ctx, a >> 2); cs_base64_emit_code(ctx, ((a & 3) << 4) | (b >> 4)); if (ctx->chunk_size > 1) { cs_base64_emit_code(ctx, (b & 15) << 2 | (c >> 6)); } if (ctx->chunk_size > 2) { cs_base64_emit_code(ctx, c & 63); } } void cs_base64_init(struct cs_base64_ctx *ctx, cs_base64_putc_t b64_putc, void *user_data) { ctx->chunk_size = 0; ctx->b64_putc = b64_putc; ctx->user_data = user_data; } void cs_base64_update(struct cs_base64_ctx *ctx, const char *str, size_t len) { const unsigned char *src = (const unsigned char *) str; size_t i; for (i = 0; i < len; i++) { ctx->chunk[ctx->chunk_size++] = src[i]; if (ctx->chunk_size == 3) { cs_base64_emit_chunk(ctx); ctx->chunk_size = 0; } } } void cs_base64_finish(struct cs_base64_ctx *ctx) { if (ctx->chunk_size > 0) { int i; memset(&ctx->chunk[ctx->chunk_size], 0, 3 - ctx->chunk_size); cs_base64_emit_chunk(ctx); for (i = 0; i < (3 - ctx->chunk_size); i++) { ctx->b64_putc('=', ctx->user_data); } } } #define BASE64_ENCODE_BODY \ static const char *b64 = \ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; \ int i, j, a, b, c; \ \ for (i = j = 0; i < src_len; i += 3) { \ a = src[i]; \ b = i + 1 >= src_len ? 0 : src[i + 1]; \ c = i + 2 >= src_len ? 0 : src[i + 2]; \ \ BASE64_OUT(b64[a >> 2]); \ BASE64_OUT(b64[((a & 3) << 4) | (b >> 4)]); \ if (i + 1 < src_len) { \ BASE64_OUT(b64[(b & 15) << 2 | (c >> 6)]); \ } \ if (i + 2 < src_len) { \ BASE64_OUT(b64[c & 63]); \ } \ } \ \ while (j % 4 != 0) { \ BASE64_OUT('='); \ } \ BASE64_FLUSH() #define BASE64_OUT(ch) \ do { \ dst[j++] = (ch); \ } while (0) #define BASE64_FLUSH() \ do { \ dst[j++] = '\0'; \ } while (0) void cs_base64_encode(const unsigned char *src, int src_len, char *dst) { BASE64_ENCODE_BODY; } #undef BASE64_OUT #undef BASE64_FLUSH #if CS_ENABLE_STDIO #define BASE64_OUT(ch) \ do { \ fprintf(f, "%c", (ch)); \ j++; \ } while (0) #define BASE64_FLUSH() void cs_fprint_base64(FILE *f, const unsigned char *src, int src_len) { BASE64_ENCODE_BODY; } #undef BASE64_OUT #undef BASE64_FLUSH #endif /* CS_ENABLE_STDIO */ /* Convert one byte of encoded base64 input stream to 6-bit chunk */ static unsigned char from_b64(unsigned char ch) { /* Inverse lookup map */ static const unsigned char tab[128] = { 255, 255, 255, 255, 255, 255, 255, 255, /* 0 */ 255, 255, 255, 255, 255, 255, 255, 255, /* 8 */ 255, 255, 255, 255, 255, 255, 255, 255, /* 16 */ 255, 255, 255, 255, 255, 255, 255, 255, /* 24 */ 255, 255, 255, 255, 255, 255, 255, 255, /* 32 */ 255, 255, 255, 62, 255, 255, 255, 63, /* 40 */ 52, 53, 54, 55, 56, 57, 58, 59, /* 48 */ 60, 61, 255, 255, 255, 200, 255, 255, /* 56 '=' is 200, on index 61 */ 255, 0, 1, 2, 3, 4, 5, 6, /* 64 */ 7, 8, 9, 10, 11, 12, 13, 14, /* 72 */ 15, 16, 17, 18, 19, 20, 21, 22, /* 80 */ 23, 24, 25, 255, 255, 255, 255, 255, /* 88 */ 255, 26, 27, 28, 29, 30, 31, 32, /* 96 */ 33, 34, 35, 36, 37, 38, 39, 40, /* 104 */ 41, 42, 43, 44, 45, 46, 47, 48, /* 112 */ 49, 50, 51, 255, 255, 255, 255, 255, /* 120 */ }; return tab[ch & 127]; } int cs_base64_decode(const unsigned char *s, int len, char *dst, int *dec_len) { unsigned char a, b, c, d; int orig_len = len; char *orig_dst = dst; while (len >= 4 && (a = from_b64(s[0])) != 255 && (b = from_b64(s[1])) != 255 && (c = from_b64(s[2])) != 255 && (d = from_b64(s[3])) != 255) { s += 4; len -= 4; if (a == 200 || b == 200) break; /* '=' can't be there */ *dst++ = a << 2 | b >> 4; if (c == 200) break; *dst++ = b << 4 | c >> 2; if (d == 200) break; *dst++ = c << 6 | d; } *dst = 0; if (dec_len != NULL) *dec_len = (dst - orig_dst); return orig_len - len; } #endif /* EXCLUDE_COMMON */ #ifdef MG_MODULE_LINES #line 1 "common/cs_dirent.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_CS_DIRENT_H_ #define CS_COMMON_CS_DIRENT_H_ #include <limits.h> /* Amalgamated: #include "common/platform.h" */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifdef CS_DEFINE_DIRENT typedef struct { int dummy; } DIR; struct dirent { int d_ino; #ifdef _WIN32 char d_name[MAX_PATH]; #else /* TODO(rojer): Use PATH_MAX but make sure it's sane on every platform */ char d_name[256]; #endif }; DIR *opendir(const char *dir_name); int closedir(DIR *dir); struct dirent *readdir(DIR *dir); #endif /* CS_DEFINE_DIRENT */ #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* CS_COMMON_CS_DIRENT_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/cs_dirent.c" #endif /* * Copyright (c) 2015 Cesanta Software Limited * All rights reserved */ #ifndef EXCLUDE_COMMON /* Amalgamated: #include "common/mg_mem.h" */ /* Amalgamated: #include "common/cs_dirent.h" */ /* * This file contains POSIX opendir/closedir/readdir API implementation * for systems which do not natively support it (e.g. Windows). */ #ifdef _WIN32 struct win32_dir { DIR d; HANDLE handle; WIN32_FIND_DATAW info; struct dirent result; }; DIR *opendir(const char *name) { struct win32_dir *dir = NULL; wchar_t wpath[MAX_PATH]; DWORD attrs; if (name == NULL) { SetLastError(ERROR_BAD_ARGUMENTS); } else if ((dir = (struct win32_dir *) MG_MALLOC(sizeof(*dir))) == NULL) { SetLastError(ERROR_NOT_ENOUGH_MEMORY); } else { to_wchar(name, wpath, ARRAY_SIZE(wpath)); attrs = GetFileAttributesW(wpath); if (attrs != 0xFFFFFFFF && (attrs & FILE_ATTRIBUTE_DIRECTORY)) { (void) wcscat(wpath, L"\\*"); dir->handle = FindFirstFileW(wpath, &dir->info); dir->result.d_name[0] = '\0'; } else { MG_FREE(dir); dir = NULL; } } return (DIR *) dir; } int closedir(DIR *d) { struct win32_dir *dir = (struct win32_dir *) d; int result = 0; if (dir != NULL) { if (dir->handle != INVALID_HANDLE_VALUE) result = FindClose(dir->handle) ? 0 : -1; MG_FREE(dir); } else { result = -1; SetLastError(ERROR_BAD_ARGUMENTS); } return result; } struct dirent *readdir(DIR *d) { struct win32_dir *dir = (struct win32_dir *) d; struct dirent *result = NULL; if (dir) { memset(&dir->result, 0, sizeof(dir->result)); if (dir->handle != INVALID_HANDLE_VALUE) { result = &dir->result; (void) WideCharToMultiByte(CP_UTF8, 0, dir->info.cFileName, -1, result->d_name, sizeof(result->d_name), NULL, NULL); if (!FindNextFileW(dir->handle, &dir->info)) { (void) FindClose(dir->handle); dir->handle = INVALID_HANDLE_VALUE; } } else { SetLastError(ERROR_FILE_NOT_FOUND); } } else { SetLastError(ERROR_BAD_ARGUMENTS); } return result; } #endif #endif /* EXCLUDE_COMMON */ /* ISO C requires a translation unit to contain at least one declaration */ typedef int cs_dirent_dummy; #ifdef MG_MODULE_LINES #line 1 "common/cs_time.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "common/cs_time.h" */ #ifndef _WIN32 #include <stddef.h> /* * There is no sys/time.h on ARMCC. */ #if !(defined(__ARMCC_VERSION) || defined(__ICCARM__)) && \ !defined(__TI_COMPILER_VERSION__) && \ (!defined(CS_PLATFORM) || CS_PLATFORM != CS_P_NXP_LPC) #include <sys/time.h> #endif #else #include <windows.h> #endif double cs_time(void) WEAK; double cs_time(void) { double now; #ifndef _WIN32 struct timeval tv; if (gettimeofday(&tv, NULL /* tz */) != 0) return 0; now = (double) tv.tv_sec + (((double) tv.tv_usec) / 1000000.0); #else SYSTEMTIME sysnow; FILETIME ftime; GetLocalTime(&sysnow); SystemTimeToFileTime(&sysnow, &ftime); /* * 1. VC 6.0 doesn't support conversion uint64 -> double, so, using int64 * This should not cause a problems in this (21th) century * 2. Windows FILETIME is a number of 100-nanosecond intervals since January * 1, 1601 while time_t is a number of _seconds_ since January 1, 1970 UTC, * thus, we need to convert to seconds and adjust amount (subtract 11644473600 * seconds) */ now = (double) (((int64_t) ftime.dwLowDateTime + ((int64_t) ftime.dwHighDateTime << 32)) / 10000000.0) - 11644473600; #endif /* _WIN32 */ return now; } #ifdef MG_MODULE_LINES #line 1 "common/cs_endian.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_CS_ENDIAN_H_ #define CS_COMMON_CS_ENDIAN_H_ /* * clang with std=-c99 uses __LITTLE_ENDIAN, by default * while for ex, RTOS gcc - LITTLE_ENDIAN, by default * it depends on __USE_BSD, but let's have everything */ #if !defined(BYTE_ORDER) && defined(__BYTE_ORDER) #define BYTE_ORDER __BYTE_ORDER #ifndef LITTLE_ENDIAN #define LITTLE_ENDIAN __LITTLE_ENDIAN #endif /* LITTLE_ENDIAN */ #ifndef BIG_ENDIAN #define BIG_ENDIAN __LITTLE_ENDIAN #endif /* BIG_ENDIAN */ #endif /* BYTE_ORDER */ #endif /* CS_COMMON_CS_ENDIAN_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/md5.c" #endif /* * This code implements the MD5 message-digest algorithm. * The algorithm is due to Ron Rivest. This code was * written by Colin Plumb in 1993, no copyright is claimed. * This code is in the public domain; do with it what you wish. * * Equivalent code is available from RSA Data Security, Inc. * This code has been tested against that, and is equivalent, * except that you don't need to include two pages of legalese * with every copy. * * To compute the message digest of a chunk of bytes, declare an * MD5Context structure, pass it to MD5Init, call MD5Update as * needed on buffers full of bytes, and then call MD5Final, which * will fill a supplied 16-byte array with the digest. */ /* Amalgamated: #include "common/md5.h" */ /* Amalgamated: #include "common/str_util.h" */ #if !defined(EXCLUDE_COMMON) #if !CS_DISABLE_MD5 /* Amalgamated: #include "common/cs_endian.h" */ static void byteReverse(unsigned char *buf, unsigned longs) { /* Forrest: MD5 expect LITTLE_ENDIAN, swap if BIG_ENDIAN */ #if BYTE_ORDER == BIG_ENDIAN do { uint32_t t = (uint32_t)((unsigned) buf[3] << 8 | buf[2]) << 16 | ((unsigned) buf[1] << 8 | buf[0]); *(uint32_t *) buf = t; buf += 4; } while (--longs); #else (void) buf; (void) longs; #endif } #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) F1(z, x, y) #define F3(x, y, z) (x ^ y ^ z) #define F4(x, y, z) (y ^ (x | ~z)) #define MD5STEP(f, w, x, y, z, data, s) \ (w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x) /* * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious * initialization constants. */ void MD5_Init(MD5_CTX *ctx) { ctx->buf[0] = 0x67452301; ctx->buf[1] = 0xefcdab89; ctx->buf[2] = 0x98badcfe; ctx->buf[3] = 0x10325476; ctx->bits[0] = 0; ctx->bits[1] = 0; } static void MD5Transform(uint32_t buf[4], uint32_t const in[16]) { register uint32_t a, b, c, d; a = buf[0]; b = buf[1]; c = buf[2]; d = buf[3]; MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); buf[0] += a; buf[1] += b; buf[2] += c; buf[3] += d; } void MD5_Update(MD5_CTX *ctx, const unsigned char *buf, size_t len) { uint32_t t; t = ctx->bits[0]; if ((ctx->bits[0] = t + ((uint32_t) len << 3)) < t) ctx->bits[1]++; ctx->bits[1] += (uint32_t) len >> 29; t = (t >> 3) & 0x3f; if (t) { unsigned char *p = (unsigned char *) ctx->in + t; t = 64 - t; if (len < t) { memcpy(p, buf, len); return; } memcpy(p, buf, t); byteReverse(ctx->in, 16); MD5Transform(ctx->buf, (uint32_t *) ctx->in); buf += t; len -= t; } while (len >= 64) { memcpy(ctx->in, buf, 64); byteReverse(ctx->in, 16); MD5Transform(ctx->buf, (uint32_t *) ctx->in); buf += 64; len -= 64; } memcpy(ctx->in, buf, len); } void MD5_Final(unsigned char digest[16], MD5_CTX *ctx) { unsigned count; unsigned char *p; uint32_t *a; count = (ctx->bits[0] >> 3) & 0x3F; p = ctx->in + count; *p++ = 0x80; count = 64 - 1 - count; if (count < 8) { memset(p, 0, count); byteReverse(ctx->in, 16); MD5Transform(ctx->buf, (uint32_t *) ctx->in); memset(ctx->in, 0, 56); } else { memset(p, 0, count - 8); } byteReverse(ctx->in, 14); a = (uint32_t *) ctx->in; a[14] = ctx->bits[0]; a[15] = ctx->bits[1]; MD5Transform(ctx->buf, (uint32_t *) ctx->in); byteReverse((unsigned char *) ctx->buf, 4); memcpy(digest, ctx->buf, 16); memset((char *) ctx, 0, sizeof(*ctx)); } char *cs_md5(char buf[33], ...) { unsigned char hash[16]; const unsigned char *p; va_list ap; MD5_CTX ctx; MD5_Init(&ctx); va_start(ap, buf); while ((p = va_arg(ap, const unsigned char *) ) != NULL) { size_t len = va_arg(ap, size_t); MD5_Update(&ctx, p, len); } va_end(ap); MD5_Final(hash, &ctx); cs_to_hex(buf, hash, sizeof(hash)); return buf; } #endif /* CS_DISABLE_MD5 */ #endif /* EXCLUDE_COMMON */ #ifdef MG_MODULE_LINES #line 1 "common/mbuf.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #ifndef EXCLUDE_COMMON #include <assert.h> #include <string.h> /* Amalgamated: #include "common/mbuf.h" */ #ifndef MBUF_REALLOC #define MBUF_REALLOC realloc #endif #ifndef MBUF_FREE #define MBUF_FREE free #endif void mbuf_init(struct mbuf *mbuf, size_t initial_size) WEAK; void mbuf_init(struct mbuf *mbuf, size_t initial_size) { mbuf->len = mbuf->size = 0; mbuf->buf = NULL; mbuf_resize(mbuf, initial_size); } void mbuf_free(struct mbuf *mbuf) WEAK; void mbuf_free(struct mbuf *mbuf) { if (mbuf->buf != NULL) { MBUF_FREE(mbuf->buf); mbuf_init(mbuf, 0); } } void mbuf_resize(struct mbuf *a, size_t new_size) WEAK; void mbuf_resize(struct mbuf *a, size_t new_size) { if (new_size > a->size || (new_size < a->size && new_size >= a->len)) { char *buf = (char *) MBUF_REALLOC(a->buf, new_size); /* * In case realloc fails, there's not much we can do, except keep things as * they are. Note that NULL is a valid return value from realloc when * size == 0, but that is covered too. */ if (buf == NULL && new_size != 0) return; a->buf = buf; a->size = new_size; } } void mbuf_trim(struct mbuf *mbuf) WEAK; void mbuf_trim(struct mbuf *mbuf) { mbuf_resize(mbuf, mbuf->len); } size_t mbuf_insert(struct mbuf *a, size_t off, const void *buf, size_t) WEAK; size_t mbuf_insert(struct mbuf *a, size_t off, const void *buf, size_t len) { char *p = NULL; assert(a != NULL); assert(a->len <= a->size); assert(off <= a->len); /* check overflow */ if (~(size_t) 0 - (size_t) a->buf < len) return 0; if (a->len + len <= a->size) { memmove(a->buf + off + len, a->buf + off, a->len - off); if (buf != NULL) { memcpy(a->buf + off, buf, len); } a->len += len; } else { size_t new_size = (size_t)((a->len + len) * MBUF_SIZE_MULTIPLIER); if ((p = (char *) MBUF_REALLOC(a->buf, new_size)) != NULL) { a->buf = p; memmove(a->buf + off + len, a->buf + off, a->len - off); if (buf != NULL) memcpy(a->buf + off, buf, len); a->len += len; a->size = new_size; } else { len = 0; } } return len; } size_t mbuf_append(struct mbuf *a, const void *buf, size_t len) WEAK; size_t mbuf_append(struct mbuf *a, const void *buf, size_t len) { return mbuf_insert(a, a->len, buf, len); } void mbuf_remove(struct mbuf *mb, size_t n) WEAK; void mbuf_remove(struct mbuf *mb, size_t n) { if (n > 0 && n <= mb->len) { memmove(mb->buf, mb->buf + n, mb->len - n); mb->len -= n; } } #endif /* EXCLUDE_COMMON */ #ifdef MG_MODULE_LINES #line 1 "common/mg_str.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "common/mg_mem.h" */ /* Amalgamated: #include "common/mg_str.h" */ #include <stdlib.h> #include <string.h> int mg_ncasecmp(const char *s1, const char *s2, size_t len) WEAK; struct mg_str mg_mk_str(const char *s) WEAK; struct mg_str mg_mk_str(const char *s) { struct mg_str ret = {s, 0}; if (s != NULL) ret.len = strlen(s); return ret; } struct mg_str mg_mk_str_n(const char *s, size_t len) WEAK; struct mg_str mg_mk_str_n(const char *s, size_t len) { struct mg_str ret = {s, len}; return ret; } int mg_vcmp(const struct mg_str *str1, const char *str2) WEAK; int mg_vcmp(const struct mg_str *str1, const char *str2) { size_t n2 = strlen(str2), n1 = str1->len; int r = strncmp(str1->p, str2, (n1 < n2) ? n1 : n2); if (r == 0) { return n1 - n2; } return r; } int mg_vcasecmp(const struct mg_str *str1, const char *str2) WEAK; int mg_vcasecmp(const struct mg_str *str1, const char *str2) { size_t n2 = strlen(str2), n1 = str1->len; int r = mg_ncasecmp(str1->p, str2, (n1 < n2) ? n1 : n2); if (r == 0) { return n1 - n2; } return r; } struct mg_str mg_strdup(const struct mg_str s) WEAK; struct mg_str mg_strdup(const struct mg_str s) { struct mg_str r = {NULL, 0}; if (s.len > 0 && s.p != NULL) { r.p = (char *) MG_MALLOC(s.len); if (r.p != NULL) { memcpy((char *) r.p, s.p, s.len); r.len = s.len; } } return r; } int mg_strcmp(const struct mg_str str1, const struct mg_str str2) WEAK; int mg_strcmp(const struct mg_str str1, const struct mg_str str2) { size_t i = 0; while (i < str1.len && i < str2.len) { if (str1.p[i] < str2.p[i]) return -1; if (str1.p[i] > str2.p[i]) return 1; i++; } if (i < str1.len) return 1; if (i < str2.len) return -1; return 0; } int mg_strncmp(const struct mg_str, const struct mg_str, size_t n) WEAK; int mg_strncmp(const struct mg_str str1, const struct mg_str str2, size_t n) { struct mg_str s1 = str1; struct mg_str s2 = str2; if (s1.len > n) { s1.len = n; } if (s2.len > n) { s2.len = n; } return mg_strcmp(s1, s2); } #ifdef MG_MODULE_LINES #line 1 "common/sha1.c" #endif /* Copyright(c) By Steve Reid <steve@edmweb.com> */ /* 100% Public Domain */ /* Amalgamated: #include "common/sha1.h" */ #if !CS_DISABLE_SHA1 && !defined(EXCLUDE_COMMON) /* Amalgamated: #include "common/cs_endian.h" */ #define SHA1HANDSOFF #if defined(__sun) /* Amalgamated: #include "common/solarisfixes.h" */ #endif union char64long16 { unsigned char c[64]; uint32_t l[16]; }; #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) static uint32_t blk0(union char64long16 *block, int i) { /* Forrest: SHA expect BIG_ENDIAN, swap if LITTLE_ENDIAN */ #if BYTE_ORDER == LITTLE_ENDIAN block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) | (rol(block->l[i], 8) & 0x00FF00FF); #endif return block->l[i]; } /* Avoid redefine warning (ARM /usr/include/sys/ucontext.h define R0~R4) */ #undef blk #undef R0 #undef R1 #undef R2 #undef R3 #undef R4 #define blk(i) \ (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ block->l[(i + 8) & 15] ^ \ block->l[(i + 2) & 15] ^ block->l[i & 15], \ 1)) #define R0(v, w, x, y, z, i) \ z += ((w & (x ^ y)) ^ y) + blk0(block, i) + 0x5A827999 + rol(v, 5); \ w = rol(w, 30); #define R1(v, w, x, y, z, i) \ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \ w = rol(w, 30); #define R2(v, w, x, y, z, i) \ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \ w = rol(w, 30); #define R3(v, w, x, y, z, i) \ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \ w = rol(w, 30); #define R4(v, w, x, y, z, i) \ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \ w = rol(w, 30); void cs_sha1_transform(uint32_t state[5], const unsigned char buffer[64]) { uint32_t a, b, c, d, e; union char64long16 block[1]; memcpy(block, buffer, 64); a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1); R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3); R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5); R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7); R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9); R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11); R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13); R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15); R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17); R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19); R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21); R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23); R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25); R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27); R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29); R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31); R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33); R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35); R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37); R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39); R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41); R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43); R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45); R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47); R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49); R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51); R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53); R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55); R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57); R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59); R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61); R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63); R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65); R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67); R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69); R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71); R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73); R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75); R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77); R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79); state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; /* Erase working structures. The order of operations is important, * used to ensure that compiler doesn't optimize those out. */ memset(block, 0, sizeof(block)); a = b = c = d = e = 0; (void) a; (void) b; (void) c; (void) d; (void) e; } void cs_sha1_init(cs_sha1_ctx *context) { context->state[0] = 0x67452301; context->state[1] = 0xEFCDAB89; context->state[2] = 0x98BADCFE; context->state[3] = 0x10325476; context->state[4] = 0xC3D2E1F0; context->count[0] = context->count[1] = 0; } void cs_sha1_update(cs_sha1_ctx *context, const unsigned char *data, uint32_t len) { uint32_t i, j; j = context->count[0]; if ((context->count[0] += len << 3) < j) context->count[1]++; context->count[1] += (len >> 29); j = (j >> 3) & 63; if ((j + len) > 63) { memcpy(&context->buffer[j], data, (i = 64 - j)); cs_sha1_transform(context->state, context->buffer); for (; i + 63 < len; i += 64) { cs_sha1_transform(context->state, &data[i]); } j = 0; } else i = 0; memcpy(&context->buffer[j], &data[i], len - i); } void cs_sha1_final(unsigned char digest[20], cs_sha1_ctx *context) { unsigned i; unsigned char finalcount[8], c; for (i = 0; i < 8; i++) { finalcount[i] = (unsigned char) ((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8)) & 255); } c = 0200; cs_sha1_update(context, &c, 1); while ((context->count[0] & 504) != 448) { c = 0000; cs_sha1_update(context, &c, 1); } cs_sha1_update(context, finalcount, 8); for (i = 0; i < 20; i++) { digest[i] = (unsigned char) ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255); } memset(context, '\0', sizeof(*context)); memset(&finalcount, '\0', sizeof(finalcount)); } void cs_hmac_sha1(const unsigned char *key, size_t keylen, const unsigned char *data, size_t datalen, unsigned char out[20]) { cs_sha1_ctx ctx; unsigned char buf1[64], buf2[64], tmp_key[20], i; if (keylen > sizeof(buf1)) { cs_sha1_init(&ctx); cs_sha1_update(&ctx, key, keylen); cs_sha1_final(tmp_key, &ctx); key = tmp_key; keylen = sizeof(tmp_key); } memset(buf1, 0, sizeof(buf1)); memset(buf2, 0, sizeof(buf2)); memcpy(buf1, key, keylen); memcpy(buf2, key, keylen); for (i = 0; i < sizeof(buf1); i++) { buf1[i] ^= 0x36; buf2[i] ^= 0x5c; } cs_sha1_init(&ctx); cs_sha1_update(&ctx, buf1, sizeof(buf1)); cs_sha1_update(&ctx, data, datalen); cs_sha1_final(out, &ctx); cs_sha1_init(&ctx); cs_sha1_update(&ctx, buf2, sizeof(buf2)); cs_sha1_update(&ctx, out, 20); cs_sha1_final(out, &ctx); } #endif /* EXCLUDE_COMMON */ #ifdef MG_MODULE_LINES #line 1 "common/str_util.c" #endif /* * Copyright (c) 2015 Cesanta Software Limited * All rights reserved */ #ifndef EXCLUDE_COMMON /* Amalgamated: #include "common/mg_mem.h" */ /* Amalgamated: #include "common/platform.h" */ /* Amalgamated: #include "common/str_util.h" */ #ifndef C_DISABLE_BUILTIN_SNPRINTF #define C_DISABLE_BUILTIN_SNPRINTF 0 #endif /* Amalgamated: #include "common/mg_mem.h" */ size_t c_strnlen(const char *s, size_t maxlen) WEAK; size_t c_strnlen(const char *s, size_t maxlen) { size_t l = 0; for (; l < maxlen && s[l] != '\0'; l++) { } return l; } #define C_SNPRINTF_APPEND_CHAR(ch) \ do { \ if (i < (int) buf_size) buf[i] = ch; \ i++; \ } while (0) #define C_SNPRINTF_FLAG_ZERO 1 #if C_DISABLE_BUILTIN_SNPRINTF int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) WEAK; int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) { return vsnprintf(buf, buf_size, fmt, ap); } #else static int c_itoa(char *buf, size_t buf_size, int64_t num, int base, int flags, int field_width) { char tmp[40]; int i = 0, k = 0, neg = 0; if (num < 0) { neg++; num = -num; } /* Print into temporary buffer - in reverse order */ do { int rem = num % base; if (rem < 10) { tmp[k++] = '0' + rem; } else { tmp[k++] = 'a' + (rem - 10); } num /= base; } while (num > 0); /* Zero padding */ if (flags && C_SNPRINTF_FLAG_ZERO) { while (k < field_width && k < (int) sizeof(tmp) - 1) { tmp[k++] = '0'; } } /* And sign */ if (neg) { tmp[k++] = '-'; } /* Now output */ while (--k >= 0) { C_SNPRINTF_APPEND_CHAR(tmp[k]); } return i; } int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) WEAK; int c_vsnprintf(char *buf, size_t buf_size, const char *fmt, va_list ap) { int ch, i = 0, len_mod, flags, precision, field_width; while ((ch = *fmt++) != '\0') { if (ch != '%') { C_SNPRINTF_APPEND_CHAR(ch); } else { /* * Conversion specification: * zero or more flags (one of: # 0 - <space> + ') * an optional minimum field width (digits) * an optional precision (. followed by digits, or *) * an optional length modifier (one of: hh h l ll L q j z t) * conversion specifier (one of: d i o u x X e E f F g G a A c s p n) */ flags = field_width = precision = len_mod = 0; /* Flags. only zero-pad flag is supported. */ if (*fmt == '0') { flags |= C_SNPRINTF_FLAG_ZERO; } /* Field width */ while (*fmt >= '0' && *fmt <= '9') { field_width *= 10; field_width += *fmt++ - '0'; } /* Dynamic field width */ if (*fmt == '*') { field_width = va_arg(ap, int); fmt++; } /* Precision */ if (*fmt == '.') { fmt++; if (*fmt == '*') { precision = va_arg(ap, int); fmt++; } else { while (*fmt >= '0' && *fmt <= '9') { precision *= 10; precision += *fmt++ - '0'; } } } /* Length modifier */ switch (*fmt) { case 'h': case 'l': case 'L': case 'I': case 'q': case 'j': case 'z': case 't': len_mod = *fmt++; if (*fmt == 'h') { len_mod = 'H'; fmt++; } if (*fmt == 'l') { len_mod = 'q'; fmt++; } break; } ch = *fmt++; if (ch == 's') { const char *s = va_arg(ap, const char *); /* Always fetch parameter */ int j; int pad = field_width - (precision >= 0 ? c_strnlen(s, precision) : 0); for (j = 0; j < pad; j++) { C_SNPRINTF_APPEND_CHAR(' '); } /* `s` may be NULL in case of %.*s */ if (s != NULL) { /* Ignore negative and 0 precisions */ for (j = 0; (precision <= 0 || j < precision) && s[j] != '\0'; j++) { C_SNPRINTF_APPEND_CHAR(s[j]); } } } else if (ch == 'c') { ch = va_arg(ap, int); /* Always fetch parameter */ C_SNPRINTF_APPEND_CHAR(ch); } else if (ch == 'd' && len_mod == 0) { i += c_itoa(buf + i, buf_size - i, va_arg(ap, int), 10, flags, field_width); } else if (ch == 'd' && len_mod == 'l') { i += c_itoa(buf + i, buf_size - i, va_arg(ap, long), 10, flags, field_width); #ifdef SSIZE_MAX } else if (ch == 'd' && len_mod == 'z') { i += c_itoa(buf + i, buf_size - i, va_arg(ap, ssize_t), 10, flags, field_width); #endif } else if (ch == 'd' && len_mod == 'q') { i += c_itoa(buf + i, buf_size - i, va_arg(ap, int64_t), 10, flags, field_width); } else if ((ch == 'x' || ch == 'u') && len_mod == 0) { i += c_itoa(buf + i, buf_size - i, va_arg(ap, unsigned), ch == 'x' ? 16 : 10, flags, field_width); } else if ((ch == 'x' || ch == 'u') && len_mod == 'l') { i += c_itoa(buf + i, buf_size - i, va_arg(ap, unsigned long), ch == 'x' ? 16 : 10, flags, field_width); } else if ((ch == 'x' || ch == 'u') && len_mod == 'z') { i += c_itoa(buf + i, buf_size - i, va_arg(ap, size_t), ch == 'x' ? 16 : 10, flags, field_width); } else if (ch == 'p') { unsigned long num = (unsigned long) (uintptr_t) va_arg(ap, void *); C_SNPRINTF_APPEND_CHAR('0'); C_SNPRINTF_APPEND_CHAR('x'); i += c_itoa(buf + i, buf_size - i, num, 16, flags, 0); } else { #ifndef NO_LIBC /* * TODO(lsm): abort is not nice in a library, remove it * Also, ESP8266 SDK doesn't have it */ abort(); #endif } } } /* Zero-terminate the result */ if (buf_size > 0) { buf[i < (int) buf_size ? i : (int) buf_size - 1] = '\0'; } return i; } #endif int c_snprintf(char *buf, size_t buf_size, const char *fmt, ...) WEAK; int c_snprintf(char *buf, size_t buf_size, const char *fmt, ...) { int result; va_list ap; va_start(ap, fmt); result = c_vsnprintf(buf, buf_size, fmt, ap); va_end(ap); return result; } #ifdef _WIN32 int to_wchar(const char *path, wchar_t *wbuf, size_t wbuf_len) { int ret; char buf[MAX_PATH * 2], buf2[MAX_PATH * 2], *p; strncpy(buf, path, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; /* Trim trailing slashes. Leave backslash for paths like "X:\" */ p = buf + strlen(buf) - 1; while (p > buf && p[-1] != ':' && (p[0] == '\\' || p[0] == '/')) *p-- = '\0'; memset(wbuf, 0, wbuf_len * sizeof(wchar_t)); ret = MultiByteToWideChar(CP_UTF8, 0, buf, -1, wbuf, (int) wbuf_len); /* * Convert back to Unicode. If doubly-converted string does not match the * original, something is fishy, reject. */ WideCharToMultiByte(CP_UTF8, 0, wbuf, (int) wbuf_len, buf2, sizeof(buf2), NULL, NULL); if (strcmp(buf, buf2) != 0) { wbuf[0] = L'\0'; ret = 0; } return ret; } #endif /* _WIN32 */ /* The simplest O(mn) algorithm. Better implementation are GPLed */ const char *c_strnstr(const char *s, const char *find, size_t slen) WEAK; const char *c_strnstr(const char *s, const char *find, size_t slen) { size_t find_length = strlen(find); size_t i; for (i = 0; i < slen; i++) { if (i + find_length > slen) { return NULL; } if (strncmp(&s[i], find, find_length) == 0) { return &s[i]; } } return NULL; } #if CS_ENABLE_STRDUP char *strdup(const char *src) WEAK; char *strdup(const char *src) { size_t len = strlen(src) + 1; char *ret = MG_MALLOC(len); if (ret != NULL) { strcpy(ret, src); } return ret; } #endif void cs_to_hex(char *to, const unsigned char *p, size_t len) WEAK; void cs_to_hex(char *to, const unsigned char *p, size_t len) { static const char *hex = "0123456789abcdef"; for (; len--; p++) { *to++ = hex[p[0] >> 4]; *to++ = hex[p[0] & 0x0f]; } *to = '\0'; } static int fourbit(int ch) { if (ch >= '0' && ch <= '9') { return ch - '0'; } else if (ch >= 'a' && ch <= 'f') { return ch - 'a' + 10; } else if (ch >= 'A' && ch <= 'F') { return ch - 'A' + 10; } return 0; } void cs_from_hex(char *to, const char *p, size_t len) WEAK; void cs_from_hex(char *to, const char *p, size_t len) { size_t i; for (i = 0; i < len; i += 2) { *to++ = (fourbit(p[i]) << 4) + fourbit(p[i + 1]); } *to = '\0'; } #if CS_ENABLE_TO64 int64_t cs_to64(const char *s) WEAK; int64_t cs_to64(const char *s) { int64_t result = 0; int64_t neg = 1; while (*s && isspace((unsigned char) *s)) s++; if (*s == '-') { neg = -1; s++; } while (isdigit((unsigned char) *s)) { result *= 10; result += (*s - '0'); s++; } return result * neg; } #endif static int str_util_lowercase(const char *s) { return tolower(*(const unsigned char *) s); } int mg_ncasecmp(const char *s1, const char *s2, size_t len) WEAK; int mg_ncasecmp(const char *s1, const char *s2, size_t len) { int diff = 0; if (len > 0) do { diff = str_util_lowercase(s1++) - str_util_lowercase(s2++); } while (diff == 0 && s1[-1] != '\0' && --len > 0); return diff; } int mg_casecmp(const char *s1, const char *s2) WEAK; int mg_casecmp(const char *s1, const char *s2) { return mg_ncasecmp(s1, s2, (size_t) ~0); } int mg_asprintf(char **buf, size_t size, const char *fmt, ...) WEAK; int mg_asprintf(char **buf, size_t size, const char *fmt, ...) { int ret; va_list ap; va_start(ap, fmt); ret = mg_avprintf(buf, size, fmt, ap); va_end(ap); return ret; } int mg_avprintf(char **buf, size_t size, const char *fmt, va_list ap) WEAK; int mg_avprintf(char **buf, size_t size, const char *fmt, va_list ap) { va_list ap_copy; int len; va_copy(ap_copy, ap); len = vsnprintf(*buf, size, fmt, ap_copy); va_end(ap_copy); if (len < 0) { /* eCos and Windows are not standard-compliant and return -1 when * the buffer is too small. Keep allocating larger buffers until we * succeed or out of memory. */ *buf = NULL; /* LCOV_EXCL_START */ while (len < 0) { MG_FREE(*buf); size *= 2; if ((*buf = (char *) MG_MALLOC(size)) == NULL) break; va_copy(ap_copy, ap); len = vsnprintf(*buf, size, fmt, ap_copy); va_end(ap_copy); } /* LCOV_EXCL_STOP */ } else if (len >= (int) size) { /* Standard-compliant code path. Allocate a buffer that is large enough. */ if ((*buf = (char *) MG_MALLOC(len + 1)) == NULL) { len = -1; /* LCOV_EXCL_LINE */ } else { /* LCOV_EXCL_LINE */ va_copy(ap_copy, ap); len = vsnprintf(*buf, len + 1, fmt, ap_copy); va_end(ap_copy); } } return len; } #endif /* EXCLUDE_COMMON */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/tun.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_MONGOOSE_SRC_TUN_H_ #define CS_MONGOOSE_SRC_TUN_H_ #if MG_ENABLE_TUN /* Amalgamated: #include "mongoose/src/net.h" */ /* Amalgamated: #include "common/mg_str.h" */ #ifndef MG_TUN_RECONNECT_INTERVAL #define MG_TUN_RECONNECT_INTERVAL 1 #endif #define MG_TUN_PROTO_NAME "mg_tun" #define MG_TUN_DATA_FRAME 0x0 #define MG_TUN_F_END_STREAM 0x1 /* * MG TUN frame format is loosely based on HTTP/2. * However since the communication happens via WebSocket * there is no need to encode the frame length, since that's * solved by WebSocket framing. * * TODO(mkm): Detailed description of the protocol. */ struct mg_tun_frame { uint8_t type; uint8_t flags; uint32_t stream_id; /* opaque stream identifier */ struct mg_str body; }; struct mg_tun_ssl_opts { #if MG_ENABLE_SSL const char *ssl_cert; const char *ssl_key; const char *ssl_ca_cert; #else int dummy; /* some compilers don't like empty structs */ #endif }; struct mg_tun_client { struct mg_mgr *mgr; struct mg_iface *iface; const char *disp_url; struct mg_tun_ssl_opts ssl; uint32_t last_stream_id; /* stream id of most recently accepted connection */ struct mg_connection *disp; struct mg_connection *listener; struct mg_connection *reconnect; }; #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ struct mg_connection *mg_tun_bind_opt(struct mg_mgr *mgr, const char *dispatcher, MG_CB(mg_event_handler_t handler, void *user_data), struct mg_bind_opts opts); int mg_tun_parse_frame(void *data, size_t len, struct mg_tun_frame *frame); void mg_tun_send_frame(struct mg_connection *ws, uint32_t stream_id, uint8_t type, uint8_t flags, struct mg_str msg); void mg_tun_destroy_client(struct mg_tun_client *client); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* MG_ENABLE_TUN */ #endif /* CS_MONGOOSE_SRC_TUN_H_ */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/net.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved * * This software is dual-licensed: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. For the terms of this * license, see <http://www.gnu.org/licenses/>. * * You are free to use this software under the terms of the GNU General * Public License, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * Alternatively, you can license this software under a commercial * license, as set out in <https://www.cesanta.com/license>. */ /* Amalgamated: #include "common/cs_time.h" */ /* Amalgamated: #include "mongoose/src/dns.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/resolv.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ /* Amalgamated: #include "mongoose/src/tun.h" */ #define MG_MAX_HOST_LEN 200 #define MG_COPY_COMMON_CONNECTION_OPTIONS(dst, src) \ memcpy(dst, src, sizeof(*dst)); /* Which flags can be pre-set by the user at connection creation time. */ #define _MG_ALLOWED_CONNECT_FLAGS_MASK \ (MG_F_USER_1 | MG_F_USER_2 | MG_F_USER_3 | MG_F_USER_4 | MG_F_USER_5 | \ MG_F_USER_6 | MG_F_WEBSOCKET_NO_DEFRAG | MG_F_ENABLE_BROADCAST) /* Which flags should be modifiable by user's callbacks. */ #define _MG_CALLBACK_MODIFIABLE_FLAGS_MASK \ (MG_F_USER_1 | MG_F_USER_2 | MG_F_USER_3 | MG_F_USER_4 | MG_F_USER_5 | \ MG_F_USER_6 | MG_F_WEBSOCKET_NO_DEFRAG | MG_F_SEND_AND_CLOSE | \ MG_F_CLOSE_IMMEDIATELY | MG_F_IS_WEBSOCKET | MG_F_DELETE_CHUNK) #ifndef intptr_t #define intptr_t long #endif MG_INTERNAL void mg_add_conn(struct mg_mgr *mgr, struct mg_connection *c) { DBG(("%p %p", mgr, c)); c->mgr = mgr; c->next = mgr->active_connections; mgr->active_connections = c; c->prev = NULL; if (c->next != NULL) c->next->prev = c; if (c->sock != INVALID_SOCKET) { c->iface->vtable->add_conn(c); } } MG_INTERNAL void mg_remove_conn(struct mg_connection *conn) { if (conn->prev == NULL) conn->mgr->active_connections = conn->next; if (conn->prev) conn->prev->next = conn->next; if (conn->next) conn->next->prev = conn->prev; conn->prev = conn->next = NULL; conn->iface->vtable->remove_conn(conn); } MG_INTERNAL void mg_call(struct mg_connection *nc, mg_event_handler_t ev_handler, void *user_data, int ev, void *ev_data) { if (ev_handler == NULL) { /* * If protocol handler is specified, call it. Otherwise, call user-specified * event handler. */ ev_handler = nc->proto_handler ? nc->proto_handler : nc->handler; } if (ev != MG_EV_POLL) { DBG(("%p %s ev=%d ev_data=%p flags=%lu rmbl=%d smbl=%d", nc, ev_handler == nc->handler ? "user" : "proto", ev, ev_data, nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len)); } #if !defined(NO_LIBC) && MG_ENABLE_HEXDUMP /* LCOV_EXCL_START */ if (nc->mgr->hexdump_file != NULL && ev != MG_EV_POLL && ev != MG_EV_SEND /* handled separately */) { if (ev == MG_EV_RECV) { mg_hexdump_connection(nc, nc->mgr->hexdump_file, nc->recv_mbuf.buf, *(int *) ev_data, ev); } else { mg_hexdump_connection(nc, nc->mgr->hexdump_file, NULL, 0, ev); } } /* LCOV_EXCL_STOP */ #endif if (ev_handler != NULL) { unsigned long flags_before = nc->flags; size_t recv_mbuf_before = nc->recv_mbuf.len, recved; ev_handler(nc, ev, ev_data MG_UD_ARG(user_data)); recved = (recv_mbuf_before - nc->recv_mbuf.len); /* Prevent user handler from fiddling with system flags. */ if (ev_handler == nc->handler && nc->flags != flags_before) { nc->flags = (flags_before & ~_MG_CALLBACK_MODIFIABLE_FLAGS_MASK) | (nc->flags & _MG_CALLBACK_MODIFIABLE_FLAGS_MASK); } if (recved > 0 && !(nc->flags & MG_F_UDP)) { nc->iface->vtable->recved(nc, recved); } } if (ev != MG_EV_POLL) { DBG(("%p after %s flags=%lu rmbl=%d smbl=%d", nc, ev_handler == nc->handler ? "user" : "proto", nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len)); } #if !MG_ENABLE_CALLBACK_USERDATA (void) user_data; #endif } void mg_if_timer(struct mg_connection *c, double now) { if (c->ev_timer_time > 0 && now >= c->ev_timer_time) { double old_value = c->ev_timer_time; mg_call(c, NULL, c->user_data, MG_EV_TIMER, &now); /* * To prevent timer firing all the time, reset the timer after delivery. * However, in case user sets it to new value, do not reset. */ if (c->ev_timer_time == old_value) { c->ev_timer_time = 0; } } } void mg_if_poll(struct mg_connection *nc, time_t now) { if (!(nc->flags & MG_F_SSL) || (nc->flags & MG_F_SSL_HANDSHAKE_DONE)) { mg_call(nc, NULL, nc->user_data, MG_EV_POLL, &now); } } static void mg_destroy_conn(struct mg_connection *conn, int destroy_if) { if (destroy_if) conn->iface->vtable->destroy_conn(conn); if (conn->proto_data != NULL && conn->proto_data_destructor != NULL) { conn->proto_data_destructor(conn->proto_data); } #if MG_ENABLE_SSL mg_ssl_if_conn_free(conn); #endif mbuf_free(&conn->recv_mbuf); mbuf_free(&conn->send_mbuf); memset(conn, 0, sizeof(*conn)); MG_FREE(conn); } void mg_close_conn(struct mg_connection *conn) { DBG(("%p %lu %d", conn, conn->flags, conn->sock)); #if MG_ENABLE_SSL if (conn->flags & MG_F_SSL_HANDSHAKE_DONE) { mg_ssl_if_conn_close_notify(conn); } #endif mg_remove_conn(conn); conn->iface->vtable->destroy_conn(conn); mg_call(conn, NULL, conn->user_data, MG_EV_CLOSE, NULL); mg_destroy_conn(conn, 0 /* destroy_if */); } void mg_mgr_init(struct mg_mgr *m, void *user_data) { struct mg_mgr_init_opts opts; memset(&opts, 0, sizeof(opts)); mg_mgr_init_opt(m, user_data, opts); } void mg_mgr_init_opt(struct mg_mgr *m, void *user_data, struct mg_mgr_init_opts opts) { memset(m, 0, sizeof(*m)); #if MG_ENABLE_BROADCAST m->ctl[0] = m->ctl[1] = INVALID_SOCKET; #endif m->user_data = user_data; #ifdef _WIN32 { WSADATA data; WSAStartup(MAKEWORD(2, 2), &data); } #elif defined(__unix__) /* Ignore SIGPIPE signal, so if client cancels the request, it * won't kill the whole process. */ signal(SIGPIPE, SIG_IGN); #endif #if MG_ENABLE_SSL { static int init_done; if (!init_done) { mg_ssl_if_init(); init_done++; } } #endif { int i; if (opts.num_ifaces == 0) { opts.num_ifaces = mg_num_ifaces; opts.ifaces = mg_ifaces; } if (opts.main_iface != NULL) { opts.ifaces[MG_MAIN_IFACE] = opts.main_iface; } m->num_ifaces = opts.num_ifaces; m->ifaces = (struct mg_iface **) MG_MALLOC(sizeof(*m->ifaces) * opts.num_ifaces); for (i = 0; i < mg_num_ifaces; i++) { m->ifaces[i] = mg_if_create_iface(opts.ifaces[i], m); m->ifaces[i]->vtable->init(m->ifaces[i]); } } if (opts.nameserver != NULL) { m->nameserver = strdup(opts.nameserver); } DBG(("==================================")); DBG(("init mgr=%p", m)); } #if MG_ENABLE_JAVASCRIPT static enum v7_err mg_send_js(struct v7 *v7, v7_val_t *res) { v7_val_t arg0 = v7_arg(v7, 0); v7_val_t arg1 = v7_arg(v7, 1); struct mg_connection *c = (struct mg_connection *) v7_get_ptr(v7, arg0); size_t len = 0; if (v7_is_string(arg1)) { const char *data = v7_get_string(v7, &arg1, &len); mg_send(c, data, len); } *res = v7_mk_number(v7, len); return V7_OK; } enum v7_err mg_enable_javascript(struct mg_mgr *m, struct v7 *v7, const char *init_file_name) { v7_val_t v; m->v7 = v7; v7_set_method(v7, v7_get_global(v7), "mg_send", mg_send_js); return v7_exec_file(v7, init_file_name, &v); } #endif void mg_mgr_free(struct mg_mgr *m) { struct mg_connection *conn, *tmp_conn; DBG(("%p", m)); if (m == NULL) return; /* Do one last poll, see https://github.com/cesanta/mongoose/issues/286 */ mg_mgr_poll(m, 0); #if MG_ENABLE_BROADCAST if (m->ctl[0] != INVALID_SOCKET) closesocket(m->ctl[0]); if (m->ctl[1] != INVALID_SOCKET) closesocket(m->ctl[1]); m->ctl[0] = m->ctl[1] = INVALID_SOCKET; #endif for (conn = m->active_connections; conn != NULL; conn = tmp_conn) { tmp_conn = conn->next; mg_close_conn(conn); } { int i; for (i = 0; i < m->num_ifaces; i++) { m->ifaces[i]->vtable->free(m->ifaces[i]); MG_FREE(m->ifaces[i]); } MG_FREE(m->ifaces); } MG_FREE((char *) m->nameserver); } time_t mg_mgr_poll(struct mg_mgr *m, int timeout_ms) { int i; time_t now = 0; /* oh GCC, seriously ? */ if (m->num_ifaces == 0) { LOG(LL_ERROR, ("cannot poll: no interfaces")); return 0; } for (i = 0; i < m->num_ifaces; i++) { now = m->ifaces[i]->vtable->poll(m->ifaces[i], timeout_ms); } return now; } int mg_vprintf(struct mg_connection *nc, const char *fmt, va_list ap) { char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem; int len; if ((len = mg_avprintf(&buf, sizeof(mem), fmt, ap)) > 0) { mg_send(nc, buf, len); } if (buf != mem && buf != NULL) { MG_FREE(buf); /* LCOV_EXCL_LINE */ } /* LCOV_EXCL_LINE */ return len; } int mg_printf(struct mg_connection *conn, const char *fmt, ...) { int len; va_list ap; va_start(ap, fmt); len = mg_vprintf(conn, fmt, ap); va_end(ap); return len; } #if MG_ENABLE_SYNC_RESOLVER /* TODO(lsm): use non-blocking resolver */ static int mg_resolve2(const char *host, struct in_addr *ina) { #if MG_ENABLE_GETADDRINFO int rv = 0; struct addrinfo hints, *servinfo, *p; struct sockaddr_in *h = NULL; memset(&hints, 0, sizeof hints); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; if ((rv = getaddrinfo(host, NULL, NULL, &servinfo)) != 0) { DBG(("getaddrinfo(%s) failed: %s", host, strerror(mg_get_errno()))); return 0; } for (p = servinfo; p != NULL; p = p->ai_next) { memcpy(&h, &p->ai_addr, sizeof(struct sockaddr_in *)); memcpy(ina, &h->sin_addr, sizeof(ina)); } freeaddrinfo(servinfo); return 1; #else struct hostent *he; if ((he = gethostbyname(host)) == NULL) { DBG(("gethostbyname(%s) failed: %s", host, strerror(mg_get_errno()))); } else { memcpy(ina, he->h_addr_list[0], sizeof(*ina)); return 1; } return 0; #endif /* MG_ENABLE_GETADDRINFO */ } int mg_resolve(const char *host, char *buf, size_t n) { struct in_addr ad; return mg_resolve2(host, &ad) ? snprintf(buf, n, "%s", inet_ntoa(ad)) : 0; } #endif /* MG_ENABLE_SYNC_RESOLVER */ MG_INTERNAL struct mg_connection *mg_create_connection_base( struct mg_mgr *mgr, mg_event_handler_t callback, struct mg_add_sock_opts opts) { struct mg_connection *conn; if ((conn = (struct mg_connection *) MG_CALLOC(1, sizeof(*conn))) != NULL) { conn->sock = INVALID_SOCKET; conn->handler = callback; conn->mgr = mgr; conn->last_io_time = (time_t) mg_time(); conn->iface = (opts.iface != NULL ? opts.iface : mgr->ifaces[MG_MAIN_IFACE]); conn->flags = opts.flags & _MG_ALLOWED_CONNECT_FLAGS_MASK; conn->user_data = opts.user_data; /* * SIZE_MAX is defined as a long long constant in * system headers on some platforms and so it * doesn't compile with pedantic ansi flags. */ conn->recv_mbuf_limit = ~0; } else { MG_SET_PTRPTR(opts.error_string, "failed to create connection"); } return conn; } MG_INTERNAL struct mg_connection *mg_create_connection( struct mg_mgr *mgr, mg_event_handler_t callback, struct mg_add_sock_opts opts) { struct mg_connection *conn = mg_create_connection_base(mgr, callback, opts); if (conn != NULL && !conn->iface->vtable->create_conn(conn)) { MG_FREE(conn); conn = NULL; } if (conn == NULL) { MG_SET_PTRPTR(opts.error_string, "failed to init connection"); } return conn; } /* * Address format: [PROTO://][HOST]:PORT * * HOST could be IPv4/IPv6 address or a host name. * `host` is a destination buffer to hold parsed HOST part. Should be at least * MG_MAX_HOST_LEN bytes long. * `proto` is a returned socket type, either SOCK_STREAM or SOCK_DGRAM * * Return: * -1 on parse error * 0 if HOST needs DNS lookup * >0 length of the address string */ MG_INTERNAL int mg_parse_address(const char *str, union socket_address *sa, int *proto, char *host, size_t host_len) { unsigned int a, b, c, d, port = 0; int ch, len = 0; #if MG_ENABLE_IPV6 char buf[100]; #endif /* * MacOS needs that. If we do not zero it, subsequent bind() will fail. * Also, all-zeroes in the socket address means binding to all addresses * for both IPv4 and IPv6 (INADDR_ANY and IN6ADDR_ANY_INIT). */ memset(sa, 0, sizeof(*sa)); sa->sin.sin_family = AF_INET; *proto = SOCK_STREAM; if (strncmp(str, "udp://", 6) == 0) { str += 6; *proto = SOCK_DGRAM; } else if (strncmp(str, "tcp://", 6) == 0) { str += 6; } if (sscanf(str, "%u.%u.%u.%u:%u%n", &a, &b, &c, &d, &port, &len) == 5) { /* Bind to a specific IPv4 address, e.g. 192.168.1.5:8080 */ sa->sin.sin_addr.s_addr = htonl(((uint32_t) a << 24) | ((uint32_t) b << 16) | c << 8 | d); sa->sin.sin_port = htons((uint16_t) port); #if MG_ENABLE_IPV6 } else if (sscanf(str, "[%99[^]]]:%u%n", buf, &port, &len) == 2 && inet_pton(AF_INET6, buf, &sa->sin6.sin6_addr)) { /* IPv6 address, e.g. [3ffe:2a00:100:7031::1]:8080 */ sa->sin6.sin6_family = AF_INET6; sa->sin.sin_port = htons((uint16_t) port); #endif #if MG_ENABLE_ASYNC_RESOLVER } else if (strlen(str) < host_len && sscanf(str, "%[^ :]:%u%n", host, &port, &len) == 2) { sa->sin.sin_port = htons((uint16_t) port); if (mg_resolve_from_hosts_file(host, sa) != 0) { /* * if resolving from hosts file failed and the host * we are trying to resolve is `localhost` - we should * try to resolve it using `gethostbyname` and do not try * to resolve it via DNS server if gethostbyname has failed too */ if (mg_ncasecmp(host, "localhost", 9) != 0) { return 0; } #if MG_ENABLE_SYNC_RESOLVER if (!mg_resolve2(host, &sa->sin.sin_addr)) { return -1; } #else return -1; #endif } #endif } else if (sscanf(str, ":%u%n", &port, &len) == 1 || sscanf(str, "%u%n", &port, &len) == 1) { /* If only port is specified, bind to IPv4, INADDR_ANY */ sa->sin.sin_port = htons((uint16_t) port); } else { return -1; } /* Required for MG_ENABLE_ASYNC_RESOLVER=0 */ (void) host; (void) host_len; ch = str[len]; /* Character that follows the address */ return port < 0xffffUL && (ch == '\0' || ch == ',' || isspace(ch)) ? len : -1; } struct mg_connection *mg_if_accept_new_conn(struct mg_connection *lc) { struct mg_add_sock_opts opts; struct mg_connection *nc; memset(&opts, 0, sizeof(opts)); nc = mg_create_connection(lc->mgr, lc->handler, opts); if (nc == NULL) return NULL; nc->listener = lc; nc->proto_handler = lc->proto_handler; nc->user_data = lc->user_data; nc->recv_mbuf_limit = lc->recv_mbuf_limit; nc->iface = lc->iface; if (lc->flags & MG_F_SSL) nc->flags |= MG_F_SSL; mg_add_conn(nc->mgr, nc); DBG(("%p %p %d %d", lc, nc, nc->sock, (int) nc->flags)); return nc; } void mg_if_accept_tcp_cb(struct mg_connection *nc, union socket_address *sa, size_t sa_len) { (void) sa_len; nc->sa = *sa; mg_call(nc, NULL, nc->user_data, MG_EV_ACCEPT, &nc->sa); } void mg_send(struct mg_connection *nc, const void *buf, int len) { nc->last_io_time = (time_t) mg_time(); if (nc->flags & MG_F_UDP) { nc->iface->vtable->udp_send(nc, buf, len); } else { nc->iface->vtable->tcp_send(nc, buf, len); } #if !defined(NO_LIBC) && MG_ENABLE_HEXDUMP if (nc->mgr && nc->mgr->hexdump_file != NULL) { mg_hexdump_connection(nc, nc->mgr->hexdump_file, buf, len, MG_EV_SEND); } #endif } void mg_if_sent_cb(struct mg_connection *nc, int num_sent) { if (num_sent < 0) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; } mg_call(nc, NULL, nc->user_data, MG_EV_SEND, &num_sent); } MG_INTERNAL void mg_recv_common(struct mg_connection *nc, void *buf, int len, int own) { DBG(("%p %d %u", nc, len, (unsigned int) nc->recv_mbuf.len)); if (nc->flags & MG_F_CLOSE_IMMEDIATELY) { DBG(("%p discarded %d bytes", nc, len)); /* * This connection will not survive next poll. Do not deliver events, * send data to /dev/null without acking. */ if (own) { MG_FREE(buf); } return; } nc->last_io_time = (time_t) mg_time(); if (!own) { mbuf_append(&nc->recv_mbuf, buf, len); } else if (nc->recv_mbuf.len == 0) { /* Adopt buf as recv_mbuf's backing store. */ mbuf_free(&nc->recv_mbuf); nc->recv_mbuf.buf = (char *) buf; nc->recv_mbuf.size = nc->recv_mbuf.len = len; } else { mbuf_append(&nc->recv_mbuf, buf, len); MG_FREE(buf); } mg_call(nc, NULL, nc->user_data, MG_EV_RECV, &len); } void mg_if_recv_tcp_cb(struct mg_connection *nc, void *buf, int len, int own) { mg_recv_common(nc, buf, len, own); } void mg_if_recv_udp_cb(struct mg_connection *nc, void *buf, int len, union socket_address *sa, size_t sa_len) { assert(nc->flags & MG_F_UDP); DBG(("%p %u", nc, (unsigned int) len)); if (nc->flags & MG_F_LISTENING) { struct mg_connection *lc = nc; /* * Do we have an existing connection for this source? * This is very inefficient for long connection lists. */ for (nc = mg_next(lc->mgr, NULL); nc != NULL; nc = mg_next(lc->mgr, nc)) { if (memcmp(&nc->sa.sa, &sa->sa, sa_len) == 0 && nc->listener == lc) { break; } } if (nc == NULL) { struct mg_add_sock_opts opts; memset(&opts, 0, sizeof(opts)); /* Create fake connection w/out sock initialization */ nc = mg_create_connection_base(lc->mgr, lc->handler, opts); if (nc != NULL) { nc->sock = lc->sock; nc->listener = lc; nc->sa = *sa; nc->proto_handler = lc->proto_handler; nc->user_data = lc->user_data; nc->recv_mbuf_limit = lc->recv_mbuf_limit; nc->flags = MG_F_UDP; /* * Long-lived UDP "connections" i.e. interactions that involve more * than one request and response are rare, most are transactional: * response is sent and the "connection" is closed. Or - should be. * But users (including ourselves) tend to forget about that part, * because UDP is connectionless and one does not think about * processing a UDP request as handling a connection that needs to be * closed. Thus, we begin with SEND_AND_CLOSE flag set, which should * be a reasonable default for most use cases, but it is possible to * turn it off the connection should be kept alive after processing. */ nc->flags |= MG_F_SEND_AND_CLOSE; mg_add_conn(lc->mgr, nc); mg_call(nc, NULL, nc->user_data, MG_EV_ACCEPT, &nc->sa); } else { DBG(("OOM")); /* No return here, we still need to drop on the floor */ } } } if (nc != NULL) { mg_recv_common(nc, buf, len, 1); } else { /* Drop on the floor. */ MG_FREE(buf); nc->iface->vtable->recved(nc, len); } } /* * Schedules an async connect for a resolved address and proto. * Called from two places: `mg_connect_opt()` and from async resolver. * When called from the async resolver, it must trigger `MG_EV_CONNECT` event * with a failure flag to indicate connection failure. */ MG_INTERNAL struct mg_connection *mg_do_connect(struct mg_connection *nc, int proto, union socket_address *sa) { DBG(("%p %s://%s:%hu", nc, proto == SOCK_DGRAM ? "udp" : "tcp", inet_ntoa(sa->sin.sin_addr), ntohs(sa->sin.sin_port))); nc->flags |= MG_F_CONNECTING; if (proto == SOCK_DGRAM) { nc->iface->vtable->connect_udp(nc); } else { nc->iface->vtable->connect_tcp(nc, sa); } mg_add_conn(nc->mgr, nc); return nc; } void mg_if_connect_cb(struct mg_connection *nc, int err) { DBG(("%p connect, err=%d", nc, err)); nc->flags &= ~MG_F_CONNECTING; if (err != 0) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; } mg_call(nc, NULL, nc->user_data, MG_EV_CONNECT, &err); } #if MG_ENABLE_ASYNC_RESOLVER /* * Callback for the async resolver on mg_connect_opt() call. * Main task of this function is to trigger MG_EV_CONNECT event with * either failure (and dealloc the connection) * or success (and proceed with connect() */ static void resolve_cb(struct mg_dns_message *msg, void *data, enum mg_resolve_err e) { struct mg_connection *nc = (struct mg_connection *) data; int i; int failure = -1; nc->flags &= ~MG_F_RESOLVING; if (msg != NULL) { /* * Take the first DNS A answer and run... */ for (i = 0; i < msg->num_answers; i++) { if (msg->answers[i].rtype == MG_DNS_A_RECORD) { /* * Async resolver guarantees that there is at least one answer. * TODO(lsm): handle IPv6 answers too */ mg_dns_parse_record_data(msg, &msg->answers[i], &nc->sa.sin.sin_addr, 4); mg_do_connect(nc, nc->flags & MG_F_UDP ? SOCK_DGRAM : SOCK_STREAM, &nc->sa); return; } } } if (e == MG_RESOLVE_TIMEOUT) { double now = mg_time(); mg_call(nc, NULL, nc->user_data, MG_EV_TIMER, &now); } /* * If we get there was no MG_DNS_A_RECORD in the answer */ mg_call(nc, NULL, nc->user_data, MG_EV_CONNECT, &failure); mg_call(nc, NULL, nc->user_data, MG_EV_CLOSE, NULL); mg_destroy_conn(nc, 1 /* destroy_if */); } #endif struct mg_connection *mg_connect(struct mg_mgr *mgr, const char *address, MG_CB(mg_event_handler_t callback, void *user_data)) { struct mg_connect_opts opts; memset(&opts, 0, sizeof(opts)); return mg_connect_opt(mgr, address, MG_CB(callback, user_data), opts); } struct mg_connection *mg_connect_opt(struct mg_mgr *mgr, const char *address, MG_CB(mg_event_handler_t callback, void *user_data), struct mg_connect_opts opts) { struct mg_connection *nc = NULL; int proto, rc; struct mg_add_sock_opts add_sock_opts; char host[MG_MAX_HOST_LEN]; MG_COPY_COMMON_CONNECTION_OPTIONS(&add_sock_opts, &opts); if ((nc = mg_create_connection(mgr, callback, add_sock_opts)) == NULL) { return NULL; } if ((rc = mg_parse_address(address, &nc->sa, &proto, host, sizeof(host))) < 0) { /* Address is malformed */ MG_SET_PTRPTR(opts.error_string, "cannot parse address"); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } nc->flags |= opts.flags & _MG_ALLOWED_CONNECT_FLAGS_MASK; nc->flags |= (proto == SOCK_DGRAM) ? MG_F_UDP : 0; #if MG_ENABLE_CALLBACK_USERDATA nc->user_data = user_data; #else nc->user_data = opts.user_data; #endif #if MG_ENABLE_SSL DBG(("%p %s %s,%s,%s", nc, address, (opts.ssl_cert ? opts.ssl_cert : "-"), (opts.ssl_key ? opts.ssl_key : "-"), (opts.ssl_ca_cert ? opts.ssl_ca_cert : "-"))); if (opts.ssl_cert != NULL || opts.ssl_ca_cert != NULL || opts.ssl_psk_identity != NULL) { const char *err_msg = NULL; struct mg_ssl_if_conn_params params; if (nc->flags & MG_F_UDP) { MG_SET_PTRPTR(opts.error_string, "SSL for UDP is not supported"); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } memset(&params, 0, sizeof(params)); params.cert = opts.ssl_cert; params.key = opts.ssl_key; params.ca_cert = opts.ssl_ca_cert; params.cipher_suites = opts.ssl_cipher_suites; params.psk_identity = opts.ssl_psk_identity; params.psk_key = opts.ssl_psk_key; if (opts.ssl_ca_cert != NULL) { if (opts.ssl_server_name != NULL) { if (strcmp(opts.ssl_server_name, "*") != 0) { params.server_name = opts.ssl_server_name; } } else if (rc == 0) { /* If it's a DNS name, use host. */ params.server_name = host; } } if (mg_ssl_if_conn_init(nc, &params, &err_msg) != MG_SSL_OK) { MG_SET_PTRPTR(opts.error_string, err_msg); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } nc->flags |= MG_F_SSL; } #endif /* MG_ENABLE_SSL */ if (rc == 0) { #if MG_ENABLE_ASYNC_RESOLVER /* * DNS resolution is required for host. * mg_parse_address() fills port in nc->sa, which we pass to resolve_cb() */ struct mg_connection *dns_conn = NULL; struct mg_resolve_async_opts o; memset(&o, 0, sizeof(o)); o.dns_conn = &dns_conn; o.nameserver = opts.nameserver; if (mg_resolve_async_opt(nc->mgr, host, MG_DNS_A_RECORD, resolve_cb, nc, o) != 0) { MG_SET_PTRPTR(opts.error_string, "cannot schedule DNS lookup"); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } nc->priv_2 = dns_conn; nc->flags |= MG_F_RESOLVING; return nc; #else MG_SET_PTRPTR(opts.error_string, "Resolver is disabled"); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; #endif } else { /* Address is parsed and resolved to IP. proceed with connect() */ return mg_do_connect(nc, proto, &nc->sa); } } struct mg_connection *mg_bind(struct mg_mgr *srv, const char *address, MG_CB(mg_event_handler_t event_handler, void *user_data)) { struct mg_bind_opts opts; memset(&opts, 0, sizeof(opts)); return mg_bind_opt(srv, address, MG_CB(event_handler, user_data), opts); } struct mg_connection *mg_bind_opt(struct mg_mgr *mgr, const char *address, MG_CB(mg_event_handler_t callback, void *user_data), struct mg_bind_opts opts) { union socket_address sa; struct mg_connection *nc = NULL; int proto, rc; struct mg_add_sock_opts add_sock_opts; char host[MG_MAX_HOST_LEN]; MG_COPY_COMMON_CONNECTION_OPTIONS(&add_sock_opts, &opts); #if MG_ENABLE_TUN if (mg_strncmp(mg_mk_str(address), mg_mk_str("ws://"), 5) == 0 || mg_strncmp(mg_mk_str(address), mg_mk_str("wss://"), 6) == 0) { return mg_tun_bind_opt(mgr, address, MG_CB(callback, user_data), opts); } #endif if (mg_parse_address(address, &sa, &proto, host, sizeof(host)) <= 0) { MG_SET_PTRPTR(opts.error_string, "cannot parse address"); return NULL; } nc = mg_create_connection(mgr, callback, add_sock_opts); if (nc == NULL) { return NULL; } nc->sa = sa; nc->flags |= MG_F_LISTENING; if (proto == SOCK_DGRAM) nc->flags |= MG_F_UDP; #if MG_ENABLE_SSL DBG(("%p %s %s,%s,%s", nc, address, (opts.ssl_cert ? opts.ssl_cert : "-"), (opts.ssl_key ? opts.ssl_key : "-"), (opts.ssl_ca_cert ? opts.ssl_ca_cert : "-"))); if (opts.ssl_cert != NULL || opts.ssl_ca_cert != NULL) { const char *err_msg = NULL; struct mg_ssl_if_conn_params params; if (nc->flags & MG_F_UDP) { MG_SET_PTRPTR(opts.error_string, "SSL for UDP is not supported"); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } memset(&params, 0, sizeof(params)); params.cert = opts.ssl_cert; params.key = opts.ssl_key; params.ca_cert = opts.ssl_ca_cert; params.cipher_suites = opts.ssl_cipher_suites; if (mg_ssl_if_conn_init(nc, &params, &err_msg) != MG_SSL_OK) { MG_SET_PTRPTR(opts.error_string, err_msg); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } nc->flags |= MG_F_SSL; } #endif /* MG_ENABLE_SSL */ if (nc->flags & MG_F_UDP) { rc = nc->iface->vtable->listen_udp(nc, &nc->sa); } else { rc = nc->iface->vtable->listen_tcp(nc, &nc->sa); } if (rc != 0) { DBG(("Failed to open listener: %d", rc)); MG_SET_PTRPTR(opts.error_string, "failed to open listener"); mg_destroy_conn(nc, 1 /* destroy_if */); return NULL; } mg_add_conn(nc->mgr, nc); #if MG_ENABLE_CALLBACK_USERDATA (void) user_data; #endif return nc; } struct mg_connection *mg_next(struct mg_mgr *s, struct mg_connection *conn) { return conn == NULL ? s->active_connections : conn->next; } #if MG_ENABLE_BROADCAST void mg_broadcast(struct mg_mgr *mgr, mg_event_handler_t cb, void *data, size_t len) { struct ctl_msg ctl_msg; /* * Mongoose manager has a socketpair, `struct mg_mgr::ctl`, * where `mg_broadcast()` pushes the message. * `mg_mgr_poll()` wakes up, reads a message from the socket pair, and calls * specified callback for each connection. Thus the callback function executes * in event manager thread. */ if (mgr->ctl[0] != INVALID_SOCKET && data != NULL && len < sizeof(ctl_msg.message)) { size_t dummy; ctl_msg.callback = cb; memcpy(ctl_msg.message, data, len); dummy = MG_SEND_FUNC(mgr->ctl[0], (char *) &ctl_msg, offsetof(struct ctl_msg, message) + len, 0); dummy = MG_RECV_FUNC(mgr->ctl[0], (char *) &len, 1, 0); (void) dummy; /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=25509 */ } } #endif /* MG_ENABLE_BROADCAST */ static int isbyte(int n) { return n >= 0 && n <= 255; } static int parse_net(const char *spec, uint32_t *net, uint32_t *mask) { int n, a, b, c, d, slash = 32, len = 0; if ((sscanf(spec, "%d.%d.%d.%d/%d%n", &a, &b, &c, &d, &slash, &n) == 5 || sscanf(spec, "%d.%d.%d.%d%n", &a, &b, &c, &d, &n) == 4) && isbyte(a) && isbyte(b) && isbyte(c) && isbyte(d) && slash >= 0 && slash < 33) { len = n; *net = ((uint32_t) a << 24) | ((uint32_t) b << 16) | ((uint32_t) c << 8) | d; *mask = slash ? 0xffffffffU << (32 - slash) : 0; } return len; } int mg_check_ip_acl(const char *acl, uint32_t remote_ip) { int allowed, flag; uint32_t net, mask; struct mg_str vec; /* If any ACL is set, deny by default */ allowed = (acl == NULL || *acl == '\0') ? '+' : '-'; while ((acl = mg_next_comma_list_entry(acl, &vec, NULL)) != NULL) { flag = vec.p[0]; if ((flag != '+' && flag != '-') || parse_net(&vec.p[1], &net, &mask) == 0) { return -1; } if (net == (remote_ip & mask)) { allowed = flag; } } DBG(("%08x %c", remote_ip, allowed)); return allowed == '+'; } /* Move data from one connection to another */ void mg_forward(struct mg_connection *from, struct mg_connection *to) { mg_send(to, from->recv_mbuf.buf, from->recv_mbuf.len); mbuf_remove(&from->recv_mbuf, from->recv_mbuf.len); } double mg_set_timer(struct mg_connection *c, double timestamp) { double result = c->ev_timer_time; c->ev_timer_time = timestamp; /* * If this connection is resolving, it's not in the list of active * connections, so not processed yet. It has a DNS resolver connection * linked to it. Set up a timer for the DNS connection. */ DBG(("%p %p %d -> %lu", c, c->priv_2, c->flags & MG_F_RESOLVING, (unsigned long) timestamp)); if ((c->flags & MG_F_RESOLVING) && c->priv_2 != NULL) { ((struct mg_connection *) c->priv_2)->ev_timer_time = timestamp; } return result; } void mg_sock_set(struct mg_connection *nc, sock_t sock) { if (sock != INVALID_SOCKET) { nc->iface->vtable->sock_set(nc, sock); } } void mg_if_get_conn_addr(struct mg_connection *nc, int remote, union socket_address *sa) { nc->iface->vtable->get_conn_addr(nc, remote, sa); } struct mg_connection *mg_add_sock_opt(struct mg_mgr *s, sock_t sock, MG_CB(mg_event_handler_t callback, void *user_data), struct mg_add_sock_opts opts) { #if MG_ENABLE_CALLBACK_USERDATA opts.user_data = user_data; #endif struct mg_connection *nc = mg_create_connection_base(s, callback, opts); if (nc != NULL) { mg_sock_set(nc, sock); mg_add_conn(nc->mgr, nc); } return nc; } struct mg_connection *mg_add_sock(struct mg_mgr *s, sock_t sock, MG_CB(mg_event_handler_t callback, void *user_data)) { struct mg_add_sock_opts opts; memset(&opts, 0, sizeof(opts)); return mg_add_sock_opt(s, sock, MG_CB(callback, user_data), opts); } double mg_time(void) { return cs_time(); } #ifdef MG_MODULE_LINES #line 1 "mongoose/src/net_if_socket.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_MONGOOSE_SRC_NET_IF_SOCKET_H_ #define CS_MONGOOSE_SRC_NET_IF_SOCKET_H_ /* Amalgamated: #include "mongoose/src/net_if.h" */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifndef MG_ENABLE_NET_IF_SOCKET #define MG_ENABLE_NET_IF_SOCKET MG_NET_IF == MG_NET_IF_SOCKET #endif extern const struct mg_iface_vtable mg_socket_iface_vtable; #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* CS_MONGOOSE_SRC_NET_IF_SOCKET_H_ */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/net_if_tun.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_MONGOOSE_SRC_NET_IF_TUN_H_ #define CS_MONGOOSE_SRC_NET_IF_TUN_H_ #if MG_ENABLE_TUN /* Amalgamated: #include "mongoose/src/net_if.h" */ struct mg_tun_client; #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ extern const struct mg_iface_vtable mg_tun_iface_vtable; struct mg_connection *mg_tun_if_find_conn(struct mg_tun_client *client, uint32_t stream_id); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* MG_ENABLE_TUN */ #endif /* CS_MONGOOSE_SRC_NET_IF_TUN_H_ */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/net_if.c" #endif /* Amalgamated: #include "mongoose/src/net_if.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/net_if_socket.h" */ /* Amalgamated: #include "mongoose/src/net_if_tun.h" */ extern const struct mg_iface_vtable mg_default_iface_vtable; #if MG_ENABLE_TUN const struct mg_iface_vtable *mg_ifaces[] = {&mg_default_iface_vtable, &mg_tun_iface_vtable}; #else const struct mg_iface_vtable *mg_ifaces[] = {&mg_default_iface_vtable}; #endif int mg_num_ifaces = (int) (sizeof(mg_ifaces) / sizeof(mg_ifaces[0])); struct mg_iface *mg_if_create_iface(const struct mg_iface_vtable *vtable, struct mg_mgr *mgr) { struct mg_iface *iface = (struct mg_iface *) MG_CALLOC(1, sizeof(*iface)); iface->mgr = mgr; iface->data = NULL; iface->vtable = vtable; return iface; } struct mg_iface *mg_find_iface(struct mg_mgr *mgr, const struct mg_iface_vtable *vtable, struct mg_iface *from) { int i = 0; if (from != NULL) { for (i = 0; i < mgr->num_ifaces; i++) { if (mgr->ifaces[i] == from) { i++; break; } } } for (; i < mgr->num_ifaces; i++) { if (mgr->ifaces[i]->vtable == vtable) { return mgr->ifaces[i]; } } return NULL; } #ifdef MG_MODULE_LINES #line 1 "mongoose/src/net_if_socket.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_NET_IF_SOCKET /* Amalgamated: #include "mongoose/src/net_if_socket.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ #define MG_TCP_RECV_BUFFER_SIZE 1024 #define MG_UDP_RECV_BUFFER_SIZE 1500 static sock_t mg_open_listening_socket(union socket_address *sa, int type, int proto); #if MG_ENABLE_SSL static void mg_ssl_begin(struct mg_connection *nc); #endif void mg_set_non_blocking_mode(sock_t sock) { #ifdef _WIN32 unsigned long on = 1; ioctlsocket(sock, FIONBIO, &on); #else int flags = fcntl(sock, F_GETFL, 0); fcntl(sock, F_SETFL, flags | O_NONBLOCK); #endif } static int mg_is_error(int n) { int err = mg_get_errno(); return (n < 0 && err != EINPROGRESS && err != EWOULDBLOCK #ifndef WINCE && err != EAGAIN && err != EINTR #endif #ifdef _WIN32 && WSAGetLastError() != WSAEINTR && WSAGetLastError() != WSAEWOULDBLOCK #endif ); } void mg_socket_if_connect_tcp(struct mg_connection *nc, const union socket_address *sa) { int rc, proto = 0; nc->sock = socket(AF_INET, SOCK_STREAM, proto); if (nc->sock == INVALID_SOCKET) { nc->err = mg_get_errno() ? mg_get_errno() : 1; return; } #if !defined(MG_ESP8266) mg_set_non_blocking_mode(nc->sock); #endif rc = connect(nc->sock, &sa->sa, sizeof(sa->sin)); nc->err = mg_is_error(rc) ? mg_get_errno() : 0; DBG(("%p sock %d rc %d errno %d err %d", nc, nc->sock, rc, mg_get_errno(), nc->err)); } void mg_socket_if_connect_udp(struct mg_connection *nc) { nc->sock = socket(AF_INET, SOCK_DGRAM, 0); if (nc->sock == INVALID_SOCKET) { nc->err = mg_get_errno() ? mg_get_errno() : 1; return; } if (nc->flags & MG_F_ENABLE_BROADCAST) { int optval = 1; setsockopt(nc->sock, SOL_SOCKET, SO_BROADCAST, (const char *) &optval, sizeof(optval)); } nc->err = 0; } int mg_socket_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) { int proto = 0; sock_t sock = mg_open_listening_socket(sa, SOCK_STREAM, proto); if (sock == INVALID_SOCKET) { return (mg_get_errno() ? mg_get_errno() : 1); } mg_sock_set(nc, sock); return 0; } int mg_socket_if_listen_udp(struct mg_connection *nc, union socket_address *sa) { sock_t sock = mg_open_listening_socket(sa, SOCK_DGRAM, 0); if (sock == INVALID_SOCKET) return (mg_get_errno() ? mg_get_errno() : 1); mg_sock_set(nc, sock); return 0; } void mg_socket_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); } void mg_socket_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); } void mg_socket_if_recved(struct mg_connection *nc, size_t len) { (void) nc; (void) len; } int mg_socket_if_create_conn(struct mg_connection *nc) { (void) nc; return 1; } void mg_socket_if_destroy_conn(struct mg_connection *nc) { if (nc->sock == INVALID_SOCKET) return; if (!(nc->flags & MG_F_UDP)) { closesocket(nc->sock); } else { /* Only close outgoing UDP sockets or listeners. */ if (nc->listener == NULL) closesocket(nc->sock); } nc->sock = INVALID_SOCKET; } static int mg_accept_conn(struct mg_connection *lc) { struct mg_connection *nc; union socket_address sa; socklen_t sa_len = sizeof(sa); /* NOTE(lsm): on Windows, sock is always > FD_SETSIZE */ sock_t sock = accept(lc->sock, &sa.sa, &sa_len); if (sock == INVALID_SOCKET) { if (mg_is_error(-1)) DBG(("%p: failed to accept: %d", lc, mg_get_errno())); return 0; } nc = mg_if_accept_new_conn(lc); if (nc == NULL) { closesocket(sock); return 0; } DBG(("%p conn from %s:%d", nc, inet_ntoa(sa.sin.sin_addr), ntohs(sa.sin.sin_port))); mg_sock_set(nc, sock); #if MG_ENABLE_SSL if (lc->flags & MG_F_SSL) { if (mg_ssl_if_conn_accept(nc, lc) != MG_SSL_OK) mg_close_conn(nc); } else #endif { mg_if_accept_tcp_cb(nc, &sa, sa_len); } return 1; } /* 'sa' must be an initialized address to bind to */ static sock_t mg_open_listening_socket(union socket_address *sa, int type, int proto) { socklen_t sa_len = (sa->sa.sa_family == AF_INET) ? sizeof(sa->sin) : sizeof(sa->sin6); sock_t sock = INVALID_SOCKET; #if !MG_LWIP int on = 1; #endif if ((sock = socket(sa->sa.sa_family, type, proto)) != INVALID_SOCKET && #if !MG_LWIP /* LWIP doesn't support either */ #if defined(_WIN32) && defined(SO_EXCLUSIVEADDRUSE) && !defined(WINCE) /* "Using SO_REUSEADDR and SO_EXCLUSIVEADDRUSE" http://goo.gl/RmrFTm */ !setsockopt(sock, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (void *) &on, sizeof(on)) && #endif #if !defined(_WIN32) || !defined(SO_EXCLUSIVEADDRUSE) /* * SO_RESUSEADDR is not enabled on Windows because the semantics of * SO_REUSEADDR on UNIX and Windows is different. On Windows, * SO_REUSEADDR allows to bind a socket to a port without error even if * the port is already open by another program. This is not the behavior * SO_REUSEADDR was designed for, and leads to hard-to-track failure * scenarios. Therefore, SO_REUSEADDR was disabled on Windows unless * SO_EXCLUSIVEADDRUSE is supported and set on a socket. */ !setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void *) &on, sizeof(on)) && #endif #endif /* !MG_LWIP */ !bind(sock, &sa->sa, sa_len) && (type == SOCK_DGRAM || listen(sock, SOMAXCONN) == 0)) { #if !MG_LWIP mg_set_non_blocking_mode(sock); /* In case port was set to 0, get the real port number */ (void) getsockname(sock, &sa->sa, &sa_len); #endif } else if (sock != INVALID_SOCKET) { closesocket(sock); sock = INVALID_SOCKET; } return sock; } static void mg_write_to_socket(struct mg_connection *nc) { struct mbuf *io = &nc->send_mbuf; int n = 0; #if MG_LWIP /* With LWIP we don't know if the socket is ready */ if (io->len == 0) return; #endif assert(io->len > 0); if (nc->flags & MG_F_UDP) { int n = sendto(nc->sock, io->buf, io->len, 0, &nc->sa.sa, sizeof(nc->sa.sin)); DBG(("%p %d %d %d %s:%hu", nc, nc->sock, n, mg_get_errno(), inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port))); if (n > 0) { mbuf_remove(io, n); mg_if_sent_cb(nc, n); } return; } #if MG_ENABLE_SSL if (nc->flags & MG_F_SSL) { if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) { n = mg_ssl_if_write(nc, io->buf, io->len); DBG(("%p %d bytes -> %d (SSL)", nc, n, nc->sock)); if (n < 0) { if (n != MG_SSL_WANT_READ && n != MG_SSL_WANT_WRITE) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; } return; } else { /* Successful SSL operation, clear off SSL wait flags */ nc->flags &= ~(MG_F_WANT_READ | MG_F_WANT_WRITE); } } else { mg_ssl_begin(nc); return; } } else #endif { n = (int) MG_SEND_FUNC(nc->sock, io->buf, io->len, 0); DBG(("%p %d bytes -> %d", nc, n, nc->sock)); if (n < 0 && mg_is_error(n)) { /* Something went wrong, drop the connection. */ nc->flags |= MG_F_CLOSE_IMMEDIATELY; return; } } if (n > 0) { mbuf_remove(io, n); mg_if_sent_cb(nc, n); } } MG_INTERNAL size_t recv_avail_size(struct mg_connection *conn, size_t max) { size_t avail; if (conn->recv_mbuf_limit < conn->recv_mbuf.len) return 0; avail = conn->recv_mbuf_limit - conn->recv_mbuf.len; return avail > max ? max : avail; } static void mg_handle_tcp_read(struct mg_connection *conn) { int n = 0; char *buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE); if (buf == NULL) { DBG(("OOM")); return; } #if MG_ENABLE_SSL if (conn->flags & MG_F_SSL) { if (conn->flags & MG_F_SSL_HANDSHAKE_DONE) { /* SSL library may have more bytes ready to read than we ask to read. * Therefore, read in a loop until we read everything. Without the loop, * we skip to the next select() cycle which can just timeout. */ while ((n = mg_ssl_if_read(conn, buf, MG_TCP_RECV_BUFFER_SIZE)) > 0) { DBG(("%p %d bytes <- %d (SSL)", conn, n, conn->sock)); mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */); buf = NULL; if (conn->flags & MG_F_CLOSE_IMMEDIATELY) break; /* buf has been freed, we need a new one. */ buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE); if (buf == NULL) break; } MG_FREE(buf); if (n < 0 && n != MG_SSL_WANT_READ) conn->flags |= MG_F_CLOSE_IMMEDIATELY; } else { MG_FREE(buf); mg_ssl_begin(conn); return; } } else #endif { n = (int) MG_RECV_FUNC(conn->sock, buf, recv_avail_size(conn, MG_TCP_RECV_BUFFER_SIZE), 0); DBG(("%p %d bytes (PLAIN) <- %d", conn, n, conn->sock)); if (n > 0) { mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */); } else { MG_FREE(buf); } if (n == 0) { /* Orderly shutdown of the socket, try flushing output. */ conn->flags |= MG_F_SEND_AND_CLOSE; } else if (mg_is_error(n)) { conn->flags |= MG_F_CLOSE_IMMEDIATELY; } } } static int mg_recvfrom(struct mg_connection *nc, union socket_address *sa, socklen_t *sa_len, char **buf) { int n; *buf = (char *) MG_MALLOC(MG_UDP_RECV_BUFFER_SIZE); if (*buf == NULL) { DBG(("Out of memory")); return -ENOMEM; } n = recvfrom(nc->sock, *buf, MG_UDP_RECV_BUFFER_SIZE, 0, &sa->sa, sa_len); if (n <= 0) { DBG(("%p recvfrom: %s", nc, strerror(mg_get_errno()))); MG_FREE(*buf); } return n; } static void mg_handle_udp_read(struct mg_connection *nc) { char *buf = NULL; union socket_address sa; socklen_t sa_len = sizeof(sa); int n = mg_recvfrom(nc, &sa, &sa_len, &buf); DBG(("%p %d bytes from %s:%d", nc, n, inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port))); mg_if_recv_udp_cb(nc, buf, n, &sa, sa_len); } #if MG_ENABLE_SSL static void mg_ssl_begin(struct mg_connection *nc) { int server_side = (nc->listener != NULL); enum mg_ssl_if_result res = mg_ssl_if_handshake(nc); DBG(("%p %d res %d", nc, server_side, res)); if (res == MG_SSL_OK) { nc->flags |= MG_F_SSL_HANDSHAKE_DONE; nc->flags &= ~(MG_F_WANT_READ | MG_F_WANT_WRITE); if (server_side) { union socket_address sa; socklen_t sa_len = sizeof(sa); (void) getpeername(nc->sock, &sa.sa, &sa_len); mg_if_accept_tcp_cb(nc, &sa, sa_len); } else { mg_if_connect_cb(nc, 0); } } else if (res != MG_SSL_WANT_READ && res != MG_SSL_WANT_WRITE) { if (!server_side) { mg_if_connect_cb(nc, res); } nc->flags |= MG_F_CLOSE_IMMEDIATELY; } } #endif /* MG_ENABLE_SSL */ #define _MG_F_FD_CAN_READ 1 #define _MG_F_FD_CAN_WRITE 1 << 1 #define _MG_F_FD_ERROR 1 << 2 void mg_mgr_handle_conn(struct mg_connection *nc, int fd_flags, double now) { int worth_logging = fd_flags != 0 || (nc->flags & (MG_F_WANT_READ | MG_F_WANT_WRITE)); if (worth_logging) { DBG(("%p fd=%d fd_flags=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock, fd_flags, nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len)); } if (nc->flags & MG_F_CONNECTING) { if (fd_flags != 0) { int err = 0; #if !defined(MG_ESP8266) if (!(nc->flags & MG_F_UDP)) { socklen_t len = sizeof(err); int ret = getsockopt(nc->sock, SOL_SOCKET, SO_ERROR, (char *) &err, &len); if (ret != 0) { err = 1; } else if (err == EAGAIN || err == EWOULDBLOCK) { err = 0; } } #else /* * On ESP8266 we use blocking connect. */ err = nc->err; #endif #if MG_ENABLE_SSL if ((nc->flags & MG_F_SSL) && err == 0) { mg_ssl_begin(nc); } else { mg_if_connect_cb(nc, err); } #else mg_if_connect_cb(nc, err); #endif } else if (nc->err != 0) { mg_if_connect_cb(nc, nc->err); } } if (fd_flags & _MG_F_FD_CAN_READ) { if (nc->flags & MG_F_UDP) { mg_handle_udp_read(nc); } else { if (nc->flags & MG_F_LISTENING) { /* * We're not looping here, and accepting just one connection at * a time. The reason is that eCos does not respect non-blocking * flag on a listening socket and hangs in a loop. */ mg_accept_conn(nc); } else { mg_handle_tcp_read(nc); } } } if (!(nc->flags & MG_F_CLOSE_IMMEDIATELY)) { if ((fd_flags & _MG_F_FD_CAN_WRITE) && nc->send_mbuf.len > 0) { mg_write_to_socket(nc); } mg_if_poll(nc, (time_t) now); mg_if_timer(nc, now); } if (worth_logging) { DBG(("%p after fd=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock, nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len)); } } #if MG_ENABLE_BROADCAST static void mg_mgr_handle_ctl_sock(struct mg_mgr *mgr) { struct ctl_msg ctl_msg; int len = (int) MG_RECV_FUNC(mgr->ctl[1], (char *) &ctl_msg, sizeof(ctl_msg), 0); size_t dummy = MG_SEND_FUNC(mgr->ctl[1], ctl_msg.message, 1, 0); DBG(("read %d from ctl socket", len)); (void) dummy; /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=25509 */ if (len >= (int) sizeof(ctl_msg.callback) && ctl_msg.callback != NULL) { struct mg_connection *nc; for (nc = mg_next(mgr, NULL); nc != NULL; nc = mg_next(mgr, nc)) { ctl_msg.callback(nc, MG_EV_POLL, ctl_msg.message MG_UD_ARG(nc->user_data)); } } } #endif /* Associate a socket to a connection. */ void mg_socket_if_sock_set(struct mg_connection *nc, sock_t sock) { mg_set_non_blocking_mode(sock); mg_set_close_on_exec(sock); nc->sock = sock; DBG(("%p %d", nc, sock)); } void mg_socket_if_init(struct mg_iface *iface) { (void) iface; DBG(("%p using select()", iface->mgr)); #if MG_ENABLE_BROADCAST do { mg_socketpair(iface->mgr->ctl, SOCK_DGRAM); } while (iface->mgr->ctl[0] == INVALID_SOCKET); #endif } void mg_socket_if_free(struct mg_iface *iface) { (void) iface; } void mg_socket_if_add_conn(struct mg_connection *nc) { (void) nc; } void mg_socket_if_remove_conn(struct mg_connection *nc) { (void) nc; } void mg_add_to_set(sock_t sock, fd_set *set, sock_t *max_fd) { if (sock != INVALID_SOCKET #ifdef __unix__ && sock < (sock_t) FD_SETSIZE #endif ) { FD_SET(sock, set); if (*max_fd == INVALID_SOCKET || sock > *max_fd) { *max_fd = sock; } } } time_t mg_socket_if_poll(struct mg_iface *iface, int timeout_ms) { struct mg_mgr *mgr = iface->mgr; double now = mg_time(); double min_timer; struct mg_connection *nc, *tmp; struct timeval tv; fd_set read_set, write_set, err_set; sock_t max_fd = INVALID_SOCKET; int num_fds, num_ev, num_timers = 0; #ifdef __unix__ int try_dup = 1; #endif FD_ZERO(&read_set); FD_ZERO(&write_set); FD_ZERO(&err_set); #if MG_ENABLE_BROADCAST mg_add_to_set(mgr->ctl[1], &read_set, &max_fd); #endif /* * Note: it is ok to have connections with sock == INVALID_SOCKET in the list, * e.g. timer-only "connections". */ min_timer = 0; for (nc = mgr->active_connections, num_fds = 0; nc != NULL; nc = tmp) { tmp = nc->next; if (nc->sock != INVALID_SOCKET) { num_fds++; #ifdef __unix__ /* A hack to make sure all our file descriptos fit into FD_SETSIZE. */ if (nc->sock >= (sock_t) FD_SETSIZE && try_dup) { int new_sock = dup(nc->sock); if (new_sock >= 0 && new_sock < (sock_t) FD_SETSIZE) { closesocket(nc->sock); DBG(("new sock %d -> %d", nc->sock, new_sock)); nc->sock = new_sock; } else { try_dup = 0; } } #endif if (!(nc->flags & MG_F_WANT_WRITE) && nc->recv_mbuf.len < nc->recv_mbuf_limit && (!(nc->flags & MG_F_UDP) || nc->listener == NULL)) { mg_add_to_set(nc->sock, &read_set, &max_fd); } if (((nc->flags & MG_F_CONNECTING) && !(nc->flags & MG_F_WANT_READ)) || (nc->send_mbuf.len > 0 && !(nc->flags & MG_F_CONNECTING))) { mg_add_to_set(nc->sock, &write_set, &max_fd); mg_add_to_set(nc->sock, &err_set, &max_fd); } } if (nc->ev_timer_time > 0) { if (num_timers == 0 || nc->ev_timer_time < min_timer) { min_timer = nc->ev_timer_time; } num_timers++; } } /* * If there is a timer to be fired earlier than the requested timeout, * adjust the timeout. */ if (num_timers > 0) { double timer_timeout_ms = (min_timer - mg_time()) * 1000 + 1 /* rounding */; if (timer_timeout_ms < timeout_ms) { timeout_ms = (int) timer_timeout_ms; } } if (timeout_ms < 0) timeout_ms = 0; tv.tv_sec = timeout_ms / 1000; tv.tv_usec = (timeout_ms % 1000) * 1000; num_ev = select((int) max_fd + 1, &read_set, &write_set, &err_set, &tv); now = mg_time(); #if 0 DBG(("select @ %ld num_ev=%d of %d, timeout=%d", (long) now, num_ev, num_fds, timeout_ms)); #endif #if MG_ENABLE_BROADCAST if (num_ev > 0 && mgr->ctl[1] != INVALID_SOCKET && FD_ISSET(mgr->ctl[1], &read_set)) { mg_mgr_handle_ctl_sock(mgr); } #endif for (nc = mgr->active_connections; nc != NULL; nc = tmp) { int fd_flags = 0; if (nc->sock != INVALID_SOCKET) { if (num_ev > 0) { fd_flags = (FD_ISSET(nc->sock, &read_set) && (!(nc->flags & MG_F_UDP) || nc->listener == NULL) ? _MG_F_FD_CAN_READ : 0) | (FD_ISSET(nc->sock, &write_set) ? _MG_F_FD_CAN_WRITE : 0) | (FD_ISSET(nc->sock, &err_set) ? _MG_F_FD_ERROR : 0); } #if MG_LWIP /* With LWIP socket emulation layer, we don't get write events for UDP */ if ((nc->flags & MG_F_UDP) && nc->listener == NULL) { fd_flags |= _MG_F_FD_CAN_WRITE; } #endif } tmp = nc->next; mg_mgr_handle_conn(nc, fd_flags, now); } for (nc = mgr->active_connections; nc != NULL; nc = tmp) { tmp = nc->next; if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) || (nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE))) { mg_close_conn(nc); } } return (time_t) now; } #if MG_ENABLE_BROADCAST int mg_socketpair(sock_t sp[2], int sock_type) { union socket_address sa; sock_t sock; socklen_t len = sizeof(sa.sin); int ret = 0; sock = sp[0] = sp[1] = INVALID_SOCKET; (void) memset(&sa, 0, sizeof(sa)); sa.sin.sin_family = AF_INET; sa.sin.sin_port = htons(0); sa.sin.sin_addr.s_addr = htonl(0x7f000001); /* 127.0.0.1 */ if ((sock = socket(AF_INET, sock_type, 0)) == INVALID_SOCKET) { } else if (bind(sock, &sa.sa, len) != 0) { } else if (sock_type == SOCK_STREAM && listen(sock, 1) != 0) { } else if (getsockname(sock, &sa.sa, &len) != 0) { } else if ((sp[0] = socket(AF_INET, sock_type, 0)) == INVALID_SOCKET) { } else if (connect(sp[0], &sa.sa, len) != 0) { } else if (sock_type == SOCK_DGRAM && (getsockname(sp[0], &sa.sa, &len) != 0 || connect(sock, &sa.sa, len) != 0)) { } else if ((sp[1] = (sock_type == SOCK_DGRAM ? sock : accept(sock, &sa.sa, &len))) == INVALID_SOCKET) { } else { mg_set_close_on_exec(sp[0]); mg_set_close_on_exec(sp[1]); if (sock_type == SOCK_STREAM) closesocket(sock); ret = 1; } if (!ret) { if (sp[0] != INVALID_SOCKET) closesocket(sp[0]); if (sp[1] != INVALID_SOCKET) closesocket(sp[1]); if (sock != INVALID_SOCKET) closesocket(sock); sock = sp[0] = sp[1] = INVALID_SOCKET; } return ret; } #endif /* MG_ENABLE_BROADCAST */ static void mg_sock_get_addr(sock_t sock, int remote, union socket_address *sa) { socklen_t slen = sizeof(*sa); memset(sa, 0, slen); if (remote) { getpeername(sock, &sa->sa, &slen); } else { getsockname(sock, &sa->sa, &slen); } } void mg_sock_to_str(sock_t sock, char *buf, size_t len, int flags) { union socket_address sa; mg_sock_get_addr(sock, flags & MG_SOCK_STRINGIFY_REMOTE, &sa); mg_sock_addr_to_str(&sa, buf, len, flags); } void mg_socket_if_get_conn_addr(struct mg_connection *nc, int remote, union socket_address *sa) { mg_sock_get_addr(nc->sock, remote, sa); } /* clang-format off */ #define MG_SOCKET_IFACE_VTABLE \ { \ mg_socket_if_init, \ mg_socket_if_free, \ mg_socket_if_add_conn, \ mg_socket_if_remove_conn, \ mg_socket_if_poll, \ mg_socket_if_listen_tcp, \ mg_socket_if_listen_udp, \ mg_socket_if_connect_tcp, \ mg_socket_if_connect_udp, \ mg_socket_if_tcp_send, \ mg_socket_if_udp_send, \ mg_socket_if_recved, \ mg_socket_if_create_conn, \ mg_socket_if_destroy_conn, \ mg_socket_if_sock_set, \ mg_socket_if_get_conn_addr, \ } /* clang-format on */ const struct mg_iface_vtable mg_socket_iface_vtable = MG_SOCKET_IFACE_VTABLE; #if MG_NET_IF == MG_NET_IF_SOCKET const struct mg_iface_vtable mg_default_iface_vtable = MG_SOCKET_IFACE_VTABLE; #endif #endif /* MG_ENABLE_NET_IF_SOCKET */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/net_if_tun.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_TUN /* Amalgamated: #include "common/cs_dbg.h" */ /* Amalgamated: #include "common/cs_time.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/net_if_tun.h" */ /* Amalgamated: #include "mongoose/src/tun.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ #define MG_TCP_RECV_BUFFER_SIZE 1024 #define MG_UDP_RECV_BUFFER_SIZE 1500 void mg_tun_if_connect_tcp(struct mg_connection *nc, const union socket_address *sa) { (void) nc; (void) sa; } void mg_tun_if_connect_udp(struct mg_connection *nc) { (void) nc; } int mg_tun_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) { (void) nc; (void) sa; return 0; } int mg_tun_if_listen_udp(struct mg_connection *nc, union socket_address *sa) { (void) nc; (void) sa; return -1; } void mg_tun_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) { struct mg_tun_client *client = (struct mg_tun_client *) nc->iface->data; uint32_t stream_id = (uint32_t)(uintptr_t) nc->mgr_data; struct mg_str msg = {(char *) buf, len}; #if MG_ENABLE_HEXDUMP char hex[512]; mg_hexdump(buf, len, hex, sizeof(hex)); LOG(LL_DEBUG, ("sending to stream %zu:\n%s", stream_id, hex)); #endif mg_tun_send_frame(client->disp, stream_id, MG_TUN_DATA_FRAME, 0, msg); } void mg_tun_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) { (void) nc; (void) buf; (void) len; } void mg_tun_if_recved(struct mg_connection *nc, size_t len) { (void) nc; (void) len; } int mg_tun_if_create_conn(struct mg_connection *nc) { (void) nc; return 1; } void mg_tun_if_destroy_conn(struct mg_connection *nc) { struct mg_tun_client *client = (struct mg_tun_client *) nc->iface->data; if (nc->flags & MG_F_LISTENING) { mg_tun_destroy_client(client); } else if (client->disp) { uint32_t stream_id = (uint32_t)(uintptr_t) nc->mgr_data; struct mg_str msg = {NULL, 0}; LOG(LL_DEBUG, ("closing %zu:", stream_id)); mg_tun_send_frame(client->disp, stream_id, MG_TUN_DATA_FRAME, MG_TUN_F_END_STREAM, msg); } } /* Associate a socket to a connection. */ void mg_tun_if_sock_set(struct mg_connection *nc, sock_t sock) { (void) nc; (void) sock; } void mg_tun_if_init(struct mg_iface *iface) { (void) iface; } void mg_tun_if_free(struct mg_iface *iface) { (void) iface; } void mg_tun_if_add_conn(struct mg_connection *nc) { nc->sock = INVALID_SOCKET; } void mg_tun_if_remove_conn(struct mg_connection *nc) { (void) nc; } time_t mg_tun_if_poll(struct mg_iface *iface, int timeout_ms) { (void) iface; (void) timeout_ms; return (time_t) cs_time(); } void mg_tun_if_get_conn_addr(struct mg_connection *nc, int remote, union socket_address *sa) { (void) nc; (void) remote; (void) sa; } struct mg_connection *mg_tun_if_find_conn(struct mg_tun_client *client, uint32_t stream_id) { struct mg_connection *nc = NULL; for (nc = client->mgr->active_connections; nc != NULL; nc = nc->next) { if (nc->iface != client->iface || (nc->flags & MG_F_LISTENING)) { continue; } if (stream_id == (uint32_t)(uintptr_t) nc->mgr_data) { return nc; } } if (stream_id > client->last_stream_id) { /* create a new connection */ LOG(LL_DEBUG, ("new stream 0x%lx, accepting", stream_id)); nc = mg_if_accept_new_conn(client->listener); nc->mgr_data = (void *) (uintptr_t) stream_id; client->last_stream_id = stream_id; } else { LOG(LL_DEBUG, ("Ignoring stream 0x%lx (last_stream_id 0x%lx)", stream_id, client->last_stream_id)); } return nc; } /* clang-format off */ #define MG_TUN_IFACE_VTABLE \ { \ mg_tun_if_init, \ mg_tun_if_free, \ mg_tun_if_add_conn, \ mg_tun_if_remove_conn, \ mg_tun_if_poll, \ mg_tun_if_listen_tcp, \ mg_tun_if_listen_udp, \ mg_tun_if_connect_tcp, \ mg_tun_if_connect_udp, \ mg_tun_if_tcp_send, \ mg_tun_if_udp_send, \ mg_tun_if_recved, \ mg_tun_if_create_conn, \ mg_tun_if_destroy_conn, \ mg_tun_if_sock_set, \ mg_tun_if_get_conn_addr, \ } /* clang-format on */ const struct mg_iface_vtable mg_tun_iface_vtable = MG_TUN_IFACE_VTABLE; #endif /* MG_ENABLE_TUN */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/ssl_if_openssl.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_OPENSSL #ifdef __APPLE__ #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif #include <openssl/ssl.h> struct mg_ssl_if_ctx { SSL *ssl; SSL_CTX *ssl_ctx; struct mbuf psk; size_t identity_len; }; void mg_ssl_if_init() { SSL_library_init(); } enum mg_ssl_if_result mg_ssl_if_conn_accept(struct mg_connection *nc, struct mg_connection *lc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx)); struct mg_ssl_if_ctx *lc_ctx = (struct mg_ssl_if_ctx *) lc->ssl_if_data; nc->ssl_if_data = ctx; if (ctx == NULL || lc_ctx == NULL) return MG_SSL_ERROR; ctx->ssl_ctx = lc_ctx->ssl_ctx; if ((ctx->ssl = SSL_new(ctx->ssl_ctx)) == NULL) { return MG_SSL_ERROR; } return MG_SSL_OK; } static enum mg_ssl_if_result mg_use_cert(SSL_CTX *ctx, const char *cert, const char *key, const char **err_msg); static enum mg_ssl_if_result mg_use_ca_cert(SSL_CTX *ctx, const char *cert); static enum mg_ssl_if_result mg_set_cipher_list(SSL_CTX *ctx, const char *cl); static enum mg_ssl_if_result mg_ssl_if_ossl_set_psk(struct mg_ssl_if_ctx *ctx, const char *identity, const char *key_str); enum mg_ssl_if_result mg_ssl_if_conn_init( struct mg_connection *nc, const struct mg_ssl_if_conn_params *params, const char **err_msg) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx)); DBG(("%p %s,%s,%s", nc, (params->cert ? params->cert : ""), (params->key ? params->key : ""), (params->ca_cert ? params->ca_cert : ""))); if (ctx == NULL) { MG_SET_PTRPTR(err_msg, "Out of memory"); return MG_SSL_ERROR; } nc->ssl_if_data = ctx; if (nc->flags & MG_F_LISTENING) { ctx->ssl_ctx = SSL_CTX_new(SSLv23_server_method()); } else { ctx->ssl_ctx = SSL_CTX_new(SSLv23_client_method()); } if (ctx->ssl_ctx == NULL) { MG_SET_PTRPTR(err_msg, "Failed to create SSL context"); return MG_SSL_ERROR; } if (params->cert != NULL && mg_use_cert(ctx->ssl_ctx, params->cert, params->key, err_msg) != MG_SSL_OK) { return MG_SSL_ERROR; } if (params->ca_cert != NULL && mg_use_ca_cert(ctx->ssl_ctx, params->ca_cert) != MG_SSL_OK) { MG_SET_PTRPTR(err_msg, "Invalid SSL CA cert"); return MG_SSL_ERROR; } if (params->server_name != NULL) { #ifdef KR_VERSION SSL_CTX_kr_set_verify_name(ctx->ssl_ctx, params->server_name); #else /* TODO(rojer): Implement server name verification on OpenSSL. */ #endif } if (mg_set_cipher_list(ctx->ssl_ctx, params->cipher_suites) != MG_SSL_OK) { MG_SET_PTRPTR(err_msg, "Invalid cipher suite list"); return MG_SSL_ERROR; } mbuf_init(&ctx->psk, 0); if (mg_ssl_if_ossl_set_psk(ctx, params->psk_identity, params->psk_key) != MG_SSL_OK) { MG_SET_PTRPTR(err_msg, "Invalid PSK settings"); return MG_SSL_ERROR; } if (!(nc->flags & MG_F_LISTENING) && (ctx->ssl = SSL_new(ctx->ssl_ctx)) == NULL) { MG_SET_PTRPTR(err_msg, "Failed to create SSL session"); return MG_SSL_ERROR; } nc->flags |= MG_F_SSL; return MG_SSL_OK; } static enum mg_ssl_if_result mg_ssl_if_ssl_err(struct mg_connection *nc, int res) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int err = SSL_get_error(ctx->ssl, res); if (err == SSL_ERROR_WANT_READ) return MG_SSL_WANT_READ; if (err == SSL_ERROR_WANT_WRITE) return MG_SSL_WANT_WRITE; DBG(("%p %p SSL error: %d %d", nc, ctx->ssl_ctx, res, err)); nc->err = err; return MG_SSL_ERROR; } enum mg_ssl_if_result mg_ssl_if_handshake(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int server_side = (nc->listener != NULL); int res; /* If descriptor is not yet set, do it now. */ if (SSL_get_fd(ctx->ssl) < 0) { if (SSL_set_fd(ctx->ssl, nc->sock) != 1) return MG_SSL_ERROR; } res = server_side ? SSL_accept(ctx->ssl) : SSL_connect(ctx->ssl); if (res != 1) return mg_ssl_if_ssl_err(nc, res); return MG_SSL_OK; } int mg_ssl_if_read(struct mg_connection *nc, void *buf, size_t buf_size) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int n = SSL_read(ctx->ssl, buf, buf_size); DBG(("%p %d -> %d", nc, (int) buf_size, n)); if (n < 0) return mg_ssl_if_ssl_err(nc, n); if (n == 0) nc->flags |= MG_F_CLOSE_IMMEDIATELY; return n; } int mg_ssl_if_write(struct mg_connection *nc, const void *data, size_t len) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int n = SSL_write(ctx->ssl, data, len); DBG(("%p %d -> %d", nc, (int) len, n)); if (n <= 0) return mg_ssl_if_ssl_err(nc, n); return n; } void mg_ssl_if_conn_close_notify(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; if (ctx == NULL) return; SSL_shutdown(ctx->ssl); } void mg_ssl_if_conn_free(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; if (ctx == NULL) return; nc->ssl_if_data = NULL; if (ctx->ssl != NULL) SSL_free(ctx->ssl); if (ctx->ssl_ctx != NULL && nc->listener == NULL) SSL_CTX_free(ctx->ssl_ctx); mbuf_free(&ctx->psk); memset(ctx, 0, sizeof(*ctx)); MG_FREE(ctx); } /* * Cipher suite options used for TLS negotiation. * https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations */ static const char mg_s_cipher_list[] = #if defined(MG_SSL_CRYPTO_MODERN) "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:" "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:" "DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:" "ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:" "ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:" "ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:" "DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:" "DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:" "!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK" #elif defined(MG_SSL_CRYPTO_OLD) "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:" "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:" "DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:" "ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:" "ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:" "ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:" "DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:" "DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:" "ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:" "AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:" "HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:" "!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA" #else /* Default - intermediate. */ "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:" "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:" "DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:" "ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:" "ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:" "ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:" "DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:" "DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:" "AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:" "DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:" "!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA" #endif ; /* * Default DH params for PFS cipher negotiation. This is a 2048-bit group. * Will be used if none are provided by the user in the certificate file. */ #if !MG_DISABLE_PFS && !defined(KR_VERSION) static const char mg_s_default_dh_params[] = "\ -----BEGIN DH PARAMETERS-----\n\ MIIBCAKCAQEAlvbgD/qh9znWIlGFcV0zdltD7rq8FeShIqIhkQ0C7hYFThrBvF2E\n\ Z9bmgaP+sfQwGpVlv9mtaWjvERbu6mEG7JTkgmVUJrUt/wiRzwTaCXBqZkdUO8Tq\n\ +E6VOEQAilstG90ikN1Tfo+K6+X68XkRUIlgawBTKuvKVwBhuvlqTGerOtnXWnrt\n\ ym//hd3cd5PBYGBix0i7oR4xdghvfR2WLVu0LgdThTBb6XP7gLd19cQ1JuBtAajZ\n\ wMuPn7qlUkEFDIkAZy59/Hue/H2Q2vU/JsvVhHWCQBL4F1ofEAt50il6ZxR1QfFK\n\ 9VGKDC4oOgm9DlxwwBoC2FjqmvQlqVV3kwIBAg==\n\ -----END DH PARAMETERS-----\n"; #endif static enum mg_ssl_if_result mg_use_ca_cert(SSL_CTX *ctx, const char *cert) { if (cert == NULL || strcmp(cert, "*") == 0) { return MG_SSL_OK; } SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, 0); return SSL_CTX_load_verify_locations(ctx, cert, NULL) == 1 ? MG_SSL_OK : MG_SSL_ERROR; } static enum mg_ssl_if_result mg_use_cert(SSL_CTX *ctx, const char *cert, const char *key, const char **err_msg) { if (key == NULL) key = cert; if (cert == NULL || cert[0] == '\0' || key == NULL || key[0] == '\0') { return MG_SSL_OK; } else if (SSL_CTX_use_certificate_file(ctx, cert, 1) == 0) { MG_SET_PTRPTR(err_msg, "Invalid SSL cert"); return MG_SSL_ERROR; } else if (SSL_CTX_use_PrivateKey_file(ctx, key, 1) == 0) { MG_SET_PTRPTR(err_msg, "Invalid SSL key"); return MG_SSL_ERROR; } else if (SSL_CTX_use_certificate_chain_file(ctx, cert) == 0) { MG_SET_PTRPTR(err_msg, "Invalid CA bundle"); return MG_SSL_ERROR; } else { SSL_CTX_set_mode(ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); #if !MG_DISABLE_PFS && !defined(KR_VERSION) BIO *bio = NULL; DH *dh = NULL; /* Try to read DH parameters from the cert/key file. */ bio = BIO_new_file(cert, "r"); if (bio != NULL) { dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); BIO_free(bio); } /* * If there are no DH params in the file, fall back to hard-coded ones. * Not ideal, but better than nothing. */ if (dh == NULL) { bio = BIO_new_mem_buf((void *) mg_s_default_dh_params, -1); dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL); BIO_free(bio); } if (dh != NULL) { SSL_CTX_set_tmp_dh(ctx, dh); SSL_CTX_set_options(ctx, SSL_OP_SINGLE_DH_USE); DH_free(dh); } #if OPENSSL_VERSION_NUMBER > 0x10002000L SSL_CTX_set_ecdh_auto(ctx, 1); #endif #endif } return MG_SSL_OK; } static enum mg_ssl_if_result mg_set_cipher_list(SSL_CTX *ctx, const char *cl) { return (SSL_CTX_set_cipher_list(ctx, cl ? cl : mg_s_cipher_list) == 1 ? MG_SSL_OK : MG_SSL_ERROR); } #ifndef KR_VERSION static unsigned int mg_ssl_if_ossl_psk_cb(SSL *ssl, const char *hint, char *identity, unsigned int max_identity_len, unsigned char *psk, unsigned int max_psk_len) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) ssl->ctx->msg_callback_arg; size_t key_len = ctx->psk.len - ctx->identity_len - 1; DBG(("hint: '%s'", (hint ? hint : ""))); if (ctx->identity_len + 1 > max_identity_len) { DBG(("identity too long")); return 0; } if (key_len > max_psk_len) { DBG(("key too long")); return 0; } memcpy(identity, ctx->psk.buf, ctx->identity_len + 1); memcpy(psk, ctx->psk.buf + ctx->identity_len + 1, key_len); (void) ssl; return key_len; } static enum mg_ssl_if_result mg_ssl_if_ossl_set_psk(struct mg_ssl_if_ctx *ctx, const char *identity, const char *key_str) { unsigned char key[32]; size_t key_len; size_t i = 0; if (identity == NULL && key_str == NULL) return MG_SSL_OK; if (identity == NULL || key_str == NULL) return MG_SSL_ERROR; key_len = strlen(key_str); if (key_len != 32 && key_len != 64) return MG_SSL_ERROR; memset(key, 0, sizeof(key)); key_len = 0; for (i = 0; key_str[i] != '\0'; i++) { unsigned char c; char hc = tolower((int) key_str[i]); if (hc >= '0' && hc <= '9') { c = hc - '0'; } else if (hc >= 'a' && hc <= 'f') { c = hc - 'a' + 0xa; } else { return MG_SSL_ERROR; } key_len = i / 2; key[key_len] <<= 4; key[key_len] |= c; } key_len++; DBG(("identity = '%s', key = (%u)", identity, (unsigned int) key_len)); ctx->identity_len = strlen(identity); mbuf_append(&ctx->psk, identity, ctx->identity_len + 1); mbuf_append(&ctx->psk, key, key_len); SSL_CTX_set_psk_client_callback(ctx->ssl_ctx, mg_ssl_if_ossl_psk_cb); /* Hack: there is no field for us to keep this, so we use msg_callback_arg */ ctx->ssl_ctx->msg_callback_arg = ctx; return MG_SSL_OK; } #else static enum mg_ssl_if_result mg_ssl_if_ossl_set_psk(struct mg_ssl_if_ctx *ctx, const char *identity, const char *key_str) { (void) ctx; (void) identity; (void) key_str; /* Krypton does not support PSK. */ return MG_SSL_ERROR; } #endif /* defined(KR_VERSION) */ const char *mg_set_ssl(struct mg_connection *nc, const char *cert, const char *ca_cert) { const char *err_msg = NULL; struct mg_ssl_if_conn_params params; memset(&params, 0, sizeof(params)); params.cert = cert; params.ca_cert = ca_cert; if (mg_ssl_if_conn_init(nc, &params, &err_msg) != MG_SSL_OK) { return err_msg; } return NULL; } #endif /* MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_OPENSSL */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/ssl_if_mbedtls.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_MBEDTLS #include <mbedtls/debug.h> #include <mbedtls/ecp.h> #include <mbedtls/platform.h> #include <mbedtls/ssl.h> #include <mbedtls/x509_crt.h> static void mg_ssl_mbed_log(void *ctx, int level, const char *file, int line, const char *str) { enum cs_log_level cs_level; switch (level) { case 1: cs_level = LL_ERROR; break; case 2: case 3: cs_level = LL_DEBUG; break; default: cs_level = LL_VERBOSE_DEBUG; } /* mbedTLS passes strings with \n at the end, strip it. */ LOG(cs_level, ("%p %.*s", ctx, (int) (strlen(str) - 1), str)); (void) file; (void) line; } struct mg_ssl_if_ctx { mbedtls_ssl_config *conf; mbedtls_ssl_context *ssl; mbedtls_x509_crt *cert; mbedtls_pk_context *key; mbedtls_x509_crt *ca_cert; struct mbuf cipher_suites; }; /* Must be provided by the platform. ctx is struct mg_connection. */ extern int mg_ssl_if_mbed_random(void *ctx, unsigned char *buf, size_t len); void mg_ssl_if_init() { } enum mg_ssl_if_result mg_ssl_if_conn_accept(struct mg_connection *nc, struct mg_connection *lc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx)); struct mg_ssl_if_ctx *lc_ctx = (struct mg_ssl_if_ctx *) lc->ssl_if_data; nc->ssl_if_data = ctx; if (ctx == NULL || lc_ctx == NULL) return MG_SSL_ERROR; ctx->ssl = (mbedtls_ssl_context *) MG_CALLOC(1, sizeof(*ctx->ssl)); if (mbedtls_ssl_setup(ctx->ssl, lc_ctx->conf) != 0) { return MG_SSL_ERROR; } return MG_SSL_OK; } static enum mg_ssl_if_result mg_use_cert(struct mg_ssl_if_ctx *ctx, const char *cert, const char *key, const char **err_msg); static enum mg_ssl_if_result mg_use_ca_cert(struct mg_ssl_if_ctx *ctx, const char *cert); static enum mg_ssl_if_result mg_set_cipher_list(struct mg_ssl_if_ctx *ctx, const char *ciphers); static enum mg_ssl_if_result mg_ssl_if_mbed_set_psk(struct mg_ssl_if_ctx *ctx, const char *identity, const char *key); enum mg_ssl_if_result mg_ssl_if_conn_init( struct mg_connection *nc, const struct mg_ssl_if_conn_params *params, const char **err_msg) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx)); DBG(("%p %s,%s,%s", nc, (params->cert ? params->cert : ""), (params->key ? params->key : ""), (params->ca_cert ? params->ca_cert : ""))); if (ctx == NULL) { MG_SET_PTRPTR(err_msg, "Out of memory"); return MG_SSL_ERROR; } nc->ssl_if_data = ctx; ctx->conf = (mbedtls_ssl_config *) MG_CALLOC(1, sizeof(*ctx->conf)); mbuf_init(&ctx->cipher_suites, 0); mbedtls_ssl_config_init(ctx->conf); mbedtls_ssl_conf_dbg(ctx->conf, mg_ssl_mbed_log, nc); if (mbedtls_ssl_config_defaults( ctx->conf, (nc->flags & MG_F_LISTENING ? MBEDTLS_SSL_IS_SERVER : MBEDTLS_SSL_IS_CLIENT), MBEDTLS_SSL_TRANSPORT_STREAM, MBEDTLS_SSL_PRESET_DEFAULT) != 0) { MG_SET_PTRPTR(err_msg, "Failed to init SSL config"); return MG_SSL_ERROR; } /* TLS 1.2 and up */ mbedtls_ssl_conf_min_version(ctx->conf, MBEDTLS_SSL_MAJOR_VERSION_3, MBEDTLS_SSL_MINOR_VERSION_3); mbedtls_ssl_conf_rng(ctx->conf, mg_ssl_if_mbed_random, nc); if (params->cert != NULL && mg_use_cert(ctx, params->cert, params->key, err_msg) != MG_SSL_OK) { return MG_SSL_ERROR; } if (params->ca_cert != NULL && mg_use_ca_cert(ctx, params->ca_cert) != MG_SSL_OK) { MG_SET_PTRPTR(err_msg, "Invalid SSL CA cert"); return MG_SSL_ERROR; } if (mg_set_cipher_list(ctx, params->cipher_suites) != MG_SSL_OK) { MG_SET_PTRPTR(err_msg, "Invalid cipher suite list"); return MG_SSL_ERROR; } if (mg_ssl_if_mbed_set_psk(ctx, params->psk_identity, params->psk_key) != MG_SSL_OK) { MG_SET_PTRPTR(err_msg, "Invalid PSK settings"); return MG_SSL_ERROR; } if (!(nc->flags & MG_F_LISTENING)) { ctx->ssl = (mbedtls_ssl_context *) MG_CALLOC(1, sizeof(*ctx->ssl)); mbedtls_ssl_init(ctx->ssl); if (mbedtls_ssl_setup(ctx->ssl, ctx->conf) != 0) { MG_SET_PTRPTR(err_msg, "Failed to create SSL session"); return MG_SSL_ERROR; } if (params->server_name != NULL && mbedtls_ssl_set_hostname(ctx->ssl, params->server_name) != 0) { return MG_SSL_ERROR; } } #ifdef MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN if (mbedtls_ssl_conf_max_frag_len(ctx->conf, #if MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 512 MBEDTLS_SSL_MAX_FRAG_LEN_512 #elif MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 1024 MBEDTLS_SSL_MAX_FRAG_LEN_1024 #elif MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 2048 MBEDTLS_SSL_MAX_FRAG_LEN_2048 #elif MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN == 4096 MBEDTLS_SSL_MAX_FRAG_LEN_4096 #else #error Invalid MG_SSL_IF_MBEDTLS_MAX_FRAG_LEN #endif ) != 0) { return MG_SSL_ERROR; } #endif nc->flags |= MG_F_SSL; return MG_SSL_OK; } #if MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL int ssl_socket_send(void *ctx, const unsigned char *buf, size_t len); int ssl_socket_recv(void *ctx, unsigned char *buf, size_t len); #else static int ssl_socket_send(void *ctx, const unsigned char *buf, size_t len) { struct mg_connection *nc = (struct mg_connection *) ctx; int n = (int) MG_SEND_FUNC(nc->sock, buf, len, 0); LOG(LL_DEBUG, ("%p %d -> %d", nc, (int) len, n)); if (n >= 0) return n; n = mg_get_errno(); return ((n == EAGAIN || n == EINPROGRESS) ? MBEDTLS_ERR_SSL_WANT_WRITE : -1); } static int ssl_socket_recv(void *ctx, unsigned char *buf, size_t len) { struct mg_connection *nc = (struct mg_connection *) ctx; int n = (int) MG_RECV_FUNC(nc->sock, buf, len, 0); LOG(LL_DEBUG, ("%p %d <- %d", nc, (int) len, n)); if (n >= 0) return n; n = mg_get_errno(); return ((n == EAGAIN || n == EINPROGRESS) ? MBEDTLS_ERR_SSL_WANT_READ : -1); } #endif static enum mg_ssl_if_result mg_ssl_if_mbed_err(struct mg_connection *nc, int ret) { if (ret == MBEDTLS_ERR_SSL_WANT_READ) return MG_SSL_WANT_READ; if (ret == MBEDTLS_ERR_SSL_WANT_WRITE) return MG_SSL_WANT_WRITE; if (ret != MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY) { /* CLOSE_NOTIFY = Normal shutdown */ LOG(LL_ERROR, ("%p SSL error: %d", nc, ret)); } nc->err = ret; nc->flags |= MG_F_CLOSE_IMMEDIATELY; return MG_SSL_ERROR; } static void mg_ssl_if_mbed_free_certs_and_keys(struct mg_ssl_if_ctx *ctx) { if (ctx->cert != NULL) { mbedtls_x509_crt_free(ctx->cert); MG_FREE(ctx->cert); ctx->cert = NULL; mbedtls_pk_free(ctx->key); MG_FREE(ctx->key); ctx->key = NULL; } if (ctx->ca_cert != NULL) { mbedtls_ssl_conf_ca_chain(ctx->conf, NULL, NULL); mbedtls_x509_crt_free(ctx->ca_cert); MG_FREE(ctx->ca_cert); ctx->ca_cert = NULL; } } enum mg_ssl_if_result mg_ssl_if_handshake(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int err; /* If bio is not yet set, do it now. */ if (ctx->ssl->p_bio == NULL) { mbedtls_ssl_set_bio(ctx->ssl, nc, ssl_socket_send, ssl_socket_recv, NULL); } err = mbedtls_ssl_handshake(ctx->ssl); if (err != 0) return mg_ssl_if_mbed_err(nc, err); #ifdef MG_SSL_IF_MBEDTLS_FREE_CERTS /* * Free the peer certificate, we don't need it after handshake. * Note that this effectively disables renegotiation. */ mbedtls_x509_crt_free(ctx->ssl->session->peer_cert); mbedtls_free(ctx->ssl->session->peer_cert); ctx->ssl->session->peer_cert = NULL; /* On a client connection we can also free our own and CA certs. */ if (nc->listener == NULL) { if (ctx->conf->key_cert != NULL) { /* Note that this assumes one key_cert entry, which matches our init. */ MG_FREE(ctx->conf->key_cert); ctx->conf->key_cert = NULL; } mbedtls_ssl_conf_ca_chain(ctx->conf, NULL, NULL); mg_ssl_if_mbed_free_certs_and_keys(ctx); } #endif return MG_SSL_OK; } int mg_ssl_if_read(struct mg_connection *nc, void *buf, size_t buf_size) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int n = mbedtls_ssl_read(ctx->ssl, (unsigned char *) buf, buf_size); DBG(("%p %d -> %d", nc, (int) buf_size, n)); if (n < 0) return mg_ssl_if_mbed_err(nc, n); if (n == 0) nc->flags |= MG_F_CLOSE_IMMEDIATELY; return n; } int mg_ssl_if_write(struct mg_connection *nc, const void *data, size_t len) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; int n = mbedtls_ssl_write(ctx->ssl, (const unsigned char *) data, len); DBG(("%p %d -> %d", nc, (int) len, n)); if (n < 0) return mg_ssl_if_mbed_err(nc, n); return n; } void mg_ssl_if_conn_close_notify(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; if (ctx == NULL) return; mbedtls_ssl_close_notify(ctx->ssl); } void mg_ssl_if_conn_free(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; if (ctx == NULL) return; nc->ssl_if_data = NULL; if (ctx->ssl != NULL) { mbedtls_ssl_free(ctx->ssl); MG_FREE(ctx->ssl); } mg_ssl_if_mbed_free_certs_and_keys(ctx); if (ctx->conf != NULL) { mbedtls_ssl_config_free(ctx->conf); MG_FREE(ctx->conf); } mbuf_free(&ctx->cipher_suites); memset(ctx, 0, sizeof(*ctx)); MG_FREE(ctx); } static enum mg_ssl_if_result mg_use_ca_cert(struct mg_ssl_if_ctx *ctx, const char *ca_cert) { if (ca_cert == NULL || strcmp(ca_cert, "*") == 0) { return MG_SSL_OK; } ctx->ca_cert = (mbedtls_x509_crt *) MG_CALLOC(1, sizeof(*ctx->ca_cert)); mbedtls_x509_crt_init(ctx->ca_cert); if (mbedtls_x509_crt_parse_file(ctx->ca_cert, ca_cert) != 0) { return MG_SSL_ERROR; } mbedtls_ssl_conf_ca_chain(ctx->conf, ctx->ca_cert, NULL); mbedtls_ssl_conf_authmode(ctx->conf, MBEDTLS_SSL_VERIFY_REQUIRED); return MG_SSL_OK; } static enum mg_ssl_if_result mg_use_cert(struct mg_ssl_if_ctx *ctx, const char *cert, const char *key, const char **err_msg) { if (key == NULL) key = cert; if (cert == NULL || cert[0] == '\0' || key == NULL || key[0] == '\0') { return MG_SSL_OK; } ctx->cert = (mbedtls_x509_crt *) MG_CALLOC(1, sizeof(*ctx->cert)); mbedtls_x509_crt_init(ctx->cert); ctx->key = (mbedtls_pk_context *) MG_CALLOC(1, sizeof(*ctx->key)); mbedtls_pk_init(ctx->key); if (mbedtls_x509_crt_parse_file(ctx->cert, cert) != 0) { MG_SET_PTRPTR(err_msg, "Invalid SSL cert"); return MG_SSL_ERROR; } if (mbedtls_pk_parse_keyfile(ctx->key, key, NULL) != 0) { MG_SET_PTRPTR(err_msg, "Invalid SSL key"); return MG_SSL_ERROR; } if (mbedtls_ssl_conf_own_cert(ctx->conf, ctx->cert, ctx->key) != 0) { MG_SET_PTRPTR(err_msg, "Invalid SSL key or cert"); return MG_SSL_ERROR; } return MG_SSL_OK; } static const int mg_s_cipher_list[] = { MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, MBEDTLS_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, MBEDTLS_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, MBEDTLS_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, MBEDTLS_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, MBEDTLS_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, MBEDTLS_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, MBEDTLS_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, MBEDTLS_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, MBEDTLS_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, MBEDTLS_TLS_RSA_WITH_AES_128_GCM_SHA256, MBEDTLS_TLS_RSA_WITH_AES_128_CBC_SHA256, MBEDTLS_TLS_RSA_WITH_AES_128_CBC_SHA, 0}; /* * Ciphers can be specified as a colon-separated list of cipher suite names. * These can be found in * https://github.com/ARMmbed/mbedtls/blob/development/library/ssl_ciphersuites.c#L267 * E.g.: TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256:TLS-DHE-RSA-WITH-AES-256-CCM */ static enum mg_ssl_if_result mg_set_cipher_list(struct mg_ssl_if_ctx *ctx, const char *ciphers) { if (ciphers != NULL) { int l, id; const char *s = ciphers, *e; char tmp[50]; while (s != NULL) { e = strchr(s, ':'); l = (e != NULL ? (e - s) : (int) strlen(s)); strncpy(tmp, s, l); tmp[l] = '\0'; id = mbedtls_ssl_get_ciphersuite_id(tmp); DBG(("%s -> %04x", tmp, id)); if (id != 0) { mbuf_append(&ctx->cipher_suites, &id, sizeof(id)); } s = (e != NULL ? e + 1 : NULL); } if (ctx->cipher_suites.len == 0) return MG_SSL_ERROR; id = 0; mbuf_append(&ctx->cipher_suites, &id, sizeof(id)); mbuf_trim(&ctx->cipher_suites); mbedtls_ssl_conf_ciphersuites(ctx->conf, (const int *) ctx->cipher_suites.buf); } else { mbedtls_ssl_conf_ciphersuites(ctx->conf, mg_s_cipher_list); } return MG_SSL_OK; } static enum mg_ssl_if_result mg_ssl_if_mbed_set_psk(struct mg_ssl_if_ctx *ctx, const char *identity, const char *key_str) { unsigned char key[32]; size_t key_len; if (identity == NULL && key_str == NULL) return MG_SSL_OK; if (identity == NULL || key_str == NULL) return MG_SSL_ERROR; key_len = strlen(key_str); if (key_len != 32 && key_len != 64) return MG_SSL_ERROR; size_t i = 0; memset(key, 0, sizeof(key)); key_len = 0; for (i = 0; key_str[i] != '\0'; i++) { unsigned char c; char hc = tolower((int) key_str[i]); if (hc >= '0' && hc <= '9') { c = hc - '0'; } else if (hc >= 'a' && hc <= 'f') { c = hc - 'a' + 0xa; } else { return MG_SSL_ERROR; } key_len = i / 2; key[key_len] <<= 4; key[key_len] |= c; } key_len++; DBG(("identity = '%s', key = (%u)", identity, (unsigned int) key_len)); /* mbedTLS makes copies of psk and identity. */ if (mbedtls_ssl_conf_psk(ctx->conf, (const unsigned char *) key, key_len, (const unsigned char *) identity, strlen(identity)) != 0) { return MG_SSL_ERROR; } return MG_SSL_OK; } const char *mg_set_ssl(struct mg_connection *nc, const char *cert, const char *ca_cert) { const char *err_msg = NULL; struct mg_ssl_if_conn_params params; memset(&params, 0, sizeof(params)); params.cert = cert; params.ca_cert = ca_cert; if (mg_ssl_if_conn_init(nc, &params, &err_msg) != MG_SSL_OK) { return err_msg; } return NULL; } /* Lazy RNG. Warning: it would be a bad idea to do this in production! */ #ifdef MG_SSL_MBED_DUMMY_RANDOM int mg_ssl_if_mbed_random(void *ctx, unsigned char *buf, size_t len) { (void) ctx; while (len--) *buf++ = rand(); return 0; } #endif #endif /* MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_MBEDTLS */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/uri.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/uri.h" */ /* * scan string until `sep`, keeping track of component boundaries in `res`. * * `p` will point to the char after the separator or it will be `end`. */ static void parse_uri_component(const char **p, const char *end, char sep, struct mg_str *res) { res->p = *p; for (; *p < end; (*p)++) { if (**p == sep) { break; } } res->len = (*p) - res->p; if (*p < end) (*p)++; } int mg_parse_uri(struct mg_str uri, struct mg_str *scheme, struct mg_str *user_info, struct mg_str *host, unsigned int *port, struct mg_str *path, struct mg_str *query, struct mg_str *fragment) { struct mg_str rscheme = {0, 0}, ruser_info = {0, 0}, rhost = {0, 0}, rpath = {0, 0}, rquery = {0, 0}, rfragment = {0, 0}; unsigned int rport = 0; enum { P_START, P_SCHEME_OR_PORT, P_USER_INFO, P_HOST, P_PORT, P_REST } state = P_START; const char *p = uri.p, *end = p + uri.len; while (p < end) { switch (state) { case P_START: /* * expecting on of: * - `scheme://xxxx` * - `xxxx:port` * - `xxxx/path` */ for (; p < end; p++) { if (*p == ':') { state = P_SCHEME_OR_PORT; break; } else if (*p == '/') { state = P_REST; break; } } if (state == P_START || state == P_REST) { rhost.p = uri.p; rhost.len = p - uri.p; } break; case P_SCHEME_OR_PORT: if (end - p >= 3 && strncmp(p, "://", 3) == 0) { rscheme.p = uri.p; rscheme.len = p - uri.p; state = P_USER_INFO; p += 2; /* point to last separator char */ } else { rhost.p = uri.p; rhost.len = p - uri.p; state = P_PORT; } break; case P_USER_INFO: p++; ruser_info.p = p; for (; p < end; p++) { if (*p == '@') { state = P_HOST; break; } else if (*p == '/') { break; } } if (p == end || *p == '/') { /* backtrack and parse as host */ state = P_HOST; p = ruser_info.p; } ruser_info.len = p - ruser_info.p; break; case P_HOST: if (*p == '@') p++; rhost.p = p; for (; p < end; p++) { if (*p == ':') { state = P_PORT; break; } else if (*p == '/') { state = P_REST; break; } } rhost.len = p - rhost.p; break; case P_PORT: p++; for (; p < end; p++) { if (*p == '/') { state = P_REST; break; } rport *= 10; rport += *p - '0'; } break; case P_REST: /* `p` points to separator. `path` includes the separator */ parse_uri_component(&p, end, '?', &rpath); parse_uri_component(&p, end, '#', &rquery); parse_uri_component(&p, end, '\0', &rfragment); break; } } if (scheme != 0) *scheme = rscheme; if (user_info != 0) *user_info = ruser_info; if (host != 0) *host = rhost; if (port != 0) *port = rport; if (path != 0) *path = rpath; if (query != 0) *query = rquery; if (fragment != 0) *fragment = rfragment; return 0; } /* Normalize the URI path. Remove/resolve "." and "..". */ int mg_normalize_uri_path(const struct mg_str *in, struct mg_str *out) { const char *s = in->p, *se = s + in->len; char *cp = (char *) out->p, *d; if (in->len == 0 || *s != '/') { out->len = 0; return 0; } d = cp; while (s < se) { const char *next = s; struct mg_str component; parse_uri_component(&next, se, '/', &component); if (mg_vcmp(&component, ".") == 0) { /* Yum. */ } else if (mg_vcmp(&component, "..") == 0) { /* Backtrack to previous slash. */ if (d > cp + 1 && *(d - 1) == '/') d--; while (d > cp && *(d - 1) != '/') d--; } else { memmove(d, s, next - s); d += next - s; } s = next; } if (d == cp) *d++ = '/'; out->p = cp; out->len = d - cp; return 1; } #ifdef MG_MODULE_LINES #line 1 "mongoose/src/http.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_HTTP /* Amalgamated: #include "common/md5.h" */ /* Amalgamated: #include "common/sha1.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ static const char *mg_version_header = "Mongoose/" MG_VERSION; enum mg_http_proto_data_type { DATA_NONE, DATA_FILE, DATA_PUT }; struct mg_http_proto_data_file { FILE *fp; /* Opened file. */ int64_t cl; /* Content-Length. How many bytes to send. */ int64_t sent; /* How many bytes have been already sent. */ int keepalive; /* Keep connection open after sending. */ enum mg_http_proto_data_type type; }; #if MG_ENABLE_HTTP_CGI struct mg_http_proto_data_cgi { struct mg_connection *cgi_nc; }; #endif struct mg_http_proto_data_chuncked { int64_t body_len; /* How many bytes of chunked body was reassembled. */ }; struct mg_http_endpoint { struct mg_http_endpoint *next; const char *name; size_t name_len; mg_event_handler_t handler; #if MG_ENABLE_CALLBACK_USERDATA void *user_data; #endif }; enum mg_http_multipart_stream_state { MPS_BEGIN, MPS_WAITING_FOR_BOUNDARY, MPS_WAITING_FOR_CHUNK, MPS_GOT_CHUNK, MPS_GOT_BOUNDARY, MPS_FINALIZE, MPS_FINISHED }; struct mg_http_multipart_stream { const char *boundary; int boundary_len; const char *var_name; const char *file_name; void *user_data; int prev_io_len; enum mg_http_multipart_stream_state state; int processing_part; }; struct mg_reverse_proxy_data { struct mg_connection *linked_conn; }; struct mg_http_proto_data { #if MG_ENABLE_FILESYSTEM struct mg_http_proto_data_file file; #endif #if MG_ENABLE_HTTP_CGI struct mg_http_proto_data_cgi cgi; #endif #if MG_ENABLE_HTTP_STREAMING_MULTIPART struct mg_http_multipart_stream mp_stream; #endif struct mg_http_proto_data_chuncked chunk; struct mg_http_endpoint *endpoints; mg_event_handler_t endpoint_handler; struct mg_reverse_proxy_data reverse_proxy_data; }; static void mg_http_conn_destructor(void *proto_data); struct mg_connection *mg_connect_http_base( struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data), struct mg_connect_opts opts, const char *schema, const char *schema_ssl, const char *url, const char **path, char **user, char **pass, char **addr); static struct mg_http_proto_data *mg_http_get_proto_data( struct mg_connection *c) { if (c->proto_data == NULL) { c->proto_data = MG_CALLOC(1, sizeof(struct mg_http_proto_data)); c->proto_data_destructor = mg_http_conn_destructor; } return (struct mg_http_proto_data *) c->proto_data; } #if MG_ENABLE_HTTP_STREAMING_MULTIPART static void mg_http_free_proto_data_mp_stream( struct mg_http_multipart_stream *mp) { MG_FREE((void *) mp->boundary); MG_FREE((void *) mp->var_name); MG_FREE((void *) mp->file_name); memset(mp, 0, sizeof(*mp)); } #endif #if MG_ENABLE_FILESYSTEM static void mg_http_free_proto_data_file(struct mg_http_proto_data_file *d) { if (d != NULL) { if (d->fp != NULL) { fclose(d->fp); } memset(d, 0, sizeof(struct mg_http_proto_data_file)); } } #endif static void mg_http_free_proto_data_endpoints(struct mg_http_endpoint **ep) { struct mg_http_endpoint *current = *ep; while (current != NULL) { struct mg_http_endpoint *tmp = current->next; MG_FREE((void *) current->name); MG_FREE(current); current = tmp; } ep = NULL; } static void mg_http_free_reverse_proxy_data(struct mg_reverse_proxy_data *rpd) { if (rpd->linked_conn != NULL) { /* * Connection has linked one, we have to unlink & close it * since _this_ connection is going to die and * it doesn't make sense to keep another one */ struct mg_http_proto_data *pd = mg_http_get_proto_data(rpd->linked_conn); if (pd->reverse_proxy_data.linked_conn != NULL) { pd->reverse_proxy_data.linked_conn->flags |= MG_F_SEND_AND_CLOSE; pd->reverse_proxy_data.linked_conn = NULL; } rpd->linked_conn = NULL; } } static void mg_http_conn_destructor(void *proto_data) { struct mg_http_proto_data *pd = (struct mg_http_proto_data *) proto_data; #if MG_ENABLE_FILESYSTEM mg_http_free_proto_data_file(&pd->file); #endif #if MG_ENABLE_HTTP_CGI mg_http_free_proto_data_cgi(&pd->cgi); #endif #if MG_ENABLE_HTTP_STREAMING_MULTIPART mg_http_free_proto_data_mp_stream(&pd->mp_stream); #endif mg_http_free_proto_data_endpoints(&pd->endpoints); mg_http_free_reverse_proxy_data(&pd->reverse_proxy_data); MG_FREE(proto_data); } #if MG_ENABLE_FILESYSTEM #define MIME_ENTRY(_ext, _type) \ { _ext, sizeof(_ext) - 1, _type } static const struct { const char *extension; size_t ext_len; const char *mime_type; } mg_static_builtin_mime_types[] = { MIME_ENTRY("html", "text/html"), MIME_ENTRY("html", "text/html"), MIME_ENTRY("htm", "text/html"), MIME_ENTRY("shtm", "text/html"), MIME_ENTRY("shtml", "text/html"), MIME_ENTRY("css", "text/css"), MIME_ENTRY("js", "application/x-javascript"), MIME_ENTRY("ico", "image/x-icon"), MIME_ENTRY("gif", "image/gif"), MIME_ENTRY("jpg", "image/jpeg"), MIME_ENTRY("jpeg", "image/jpeg"), MIME_ENTRY("png", "image/png"), MIME_ENTRY("svg", "image/svg+xml"), MIME_ENTRY("txt", "text/plain"), MIME_ENTRY("torrent", "application/x-bittorrent"), MIME_ENTRY("wav", "audio/x-wav"), MIME_ENTRY("mp3", "audio/x-mp3"), MIME_ENTRY("mid", "audio/mid"), MIME_ENTRY("m3u", "audio/x-mpegurl"), MIME_ENTRY("ogg", "application/ogg"), MIME_ENTRY("ram", "audio/x-pn-realaudio"), MIME_ENTRY("xml", "text/xml"), MIME_ENTRY("ttf", "application/x-font-ttf"), MIME_ENTRY("json", "application/json"), MIME_ENTRY("xslt", "application/xml"), MIME_ENTRY("xsl", "application/xml"), MIME_ENTRY("ra", "audio/x-pn-realaudio"), MIME_ENTRY("doc", "application/msword"), MIME_ENTRY("exe", "application/octet-stream"), MIME_ENTRY("zip", "application/x-zip-compressed"), MIME_ENTRY("xls", "application/excel"), MIME_ENTRY("tgz", "application/x-tar-gz"), MIME_ENTRY("tar", "application/x-tar"), MIME_ENTRY("gz", "application/x-gunzip"), MIME_ENTRY("arj", "application/x-arj-compressed"), MIME_ENTRY("rar", "application/x-rar-compressed"), MIME_ENTRY("rtf", "application/rtf"), MIME_ENTRY("pdf", "application/pdf"), MIME_ENTRY("swf", "application/x-shockwave-flash"), MIME_ENTRY("mpg", "video/mpeg"), MIME_ENTRY("webm", "video/webm"), MIME_ENTRY("mpeg", "video/mpeg"), MIME_ENTRY("mov", "video/quicktime"), MIME_ENTRY("mp4", "video/mp4"), MIME_ENTRY("m4v", "video/x-m4v"), MIME_ENTRY("asf", "video/x-ms-asf"), MIME_ENTRY("avi", "video/x-msvideo"), MIME_ENTRY("bmp", "image/bmp"), {NULL, 0, NULL}}; static struct mg_str mg_get_mime_type(const char *path, const char *dflt, const struct mg_serve_http_opts *opts) { const char *ext, *overrides; size_t i, path_len; struct mg_str r, k, v; path_len = strlen(path); overrides = opts->custom_mime_types; while ((overrides = mg_next_comma_list_entry(overrides, &k, &v)) != NULL) { ext = path + (path_len - k.len); if (path_len > k.len && mg_vcasecmp(&k, ext) == 0) { return v; } } for (i = 0; mg_static_builtin_mime_types[i].extension != NULL; i++) { ext = path + (path_len - mg_static_builtin_mime_types[i].ext_len); if (path_len > mg_static_builtin_mime_types[i].ext_len && ext[-1] == '.' && mg_casecmp(ext, mg_static_builtin_mime_types[i].extension) == 0) { r.p = mg_static_builtin_mime_types[i].mime_type; r.len = strlen(r.p); return r; } } r.p = dflt; r.len = strlen(r.p); return r; } #endif /* * Check whether full request is buffered. Return: * -1 if request is malformed * 0 if request is not yet fully buffered * >0 actual request length, including last \r\n\r\n */ static int mg_http_get_request_len(const char *s, int buf_len) { const unsigned char *buf = (unsigned char *) s; int i; for (i = 0; i < buf_len; i++) { if (!isprint(buf[i]) && buf[i] != '\r' && buf[i] != '\n' && buf[i] < 128) { return -1; } else if (buf[i] == '\n' && i + 1 < buf_len && buf[i + 1] == '\n') { return i + 2; } else if (buf[i] == '\n' && i + 2 < buf_len && buf[i + 1] == '\r' && buf[i + 2] == '\n') { return i + 3; } } return 0; } static const char *mg_http_parse_headers(const char *s, const char *end, int len, struct http_message *req) { int i = 0; while (i < (int) ARRAY_SIZE(req->header_names) - 1) { struct mg_str *k = &req->header_names[i], *v = &req->header_values[i]; s = mg_skip(s, end, ": ", k); s = mg_skip(s, end, "\r\n", v); while (v->len > 0 && v->p[v->len - 1] == ' ') { v->len--; /* Trim trailing spaces in header value */ } /* * If header value is empty - skip it and go to next (if any). * NOTE: Do not add it to headers_values because such addition changes API * behaviour */ if (k->len != 0 && v->len == 0) { continue; } if (k->len == 0 || v->len == 0) { k->p = v->p = NULL; k->len = v->len = 0; break; } if (!mg_ncasecmp(k->p, "Content-Length", 14)) { req->body.len = (size_t) to64(v->p); req->message.len = len + req->body.len; } i++; } return s; } int mg_parse_http(const char *s, int n, struct http_message *hm, int is_req) { const char *end, *qs; int len = mg_http_get_request_len(s, n); if (len <= 0) return len; memset(hm, 0, sizeof(*hm)); hm->message.p = s; hm->body.p = s + len; hm->message.len = hm->body.len = (size_t) ~0; end = s + len; /* Request is fully buffered. Skip leading whitespaces. */ while (s < end && isspace(*(unsigned char *) s)) s++; if (is_req) { /* Parse request line: method, URI, proto */ s = mg_skip(s, end, " ", &hm->method); s = mg_skip(s, end, " ", &hm->uri); s = mg_skip(s, end, "\r\n", &hm->proto); if (hm->uri.p <= hm->method.p || hm->proto.p <= hm->uri.p) return -1; /* If URI contains '?' character, initialize query_string */ if ((qs = (char *) memchr(hm->uri.p, '?', hm->uri.len)) != NULL) { hm->query_string.p = qs + 1; hm->query_string.len = &hm->uri.p[hm->uri.len] - (qs + 1); hm->uri.len = qs - hm->uri.p; } } else { s = mg_skip(s, end, " ", &hm->proto); if (end - s < 4 || s[3] != ' ') return -1; hm->resp_code = atoi(s); if (hm->resp_code < 100 || hm->resp_code >= 600) return -1; s += 4; s = mg_skip(s, end, "\r\n", &hm->resp_status_msg); } s = mg_http_parse_headers(s, end, len, hm); /* * mg_parse_http() is used to parse both HTTP requests and HTTP * responses. If HTTP response does not have Content-Length set, then * body is read until socket is closed, i.e. body.len is infinite (~0). * * For HTTP requests though, according to * http://tools.ietf.org/html/rfc7231#section-8.1.3, * only POST and PUT methods have defined body semantics. * Therefore, if Content-Length is not specified and methods are * not one of PUT or POST, set body length to 0. * * So, * if it is HTTP request, and Content-Length is not set, * and method is not (PUT or POST) then reset body length to zero. */ if (hm->body.len == (size_t) ~0 && is_req && mg_vcasecmp(&hm->method, "PUT") != 0 && mg_vcasecmp(&hm->method, "POST") != 0) { hm->body.len = 0; hm->message.len = len; } return len; } struct mg_str *mg_get_http_header(struct http_message *hm, const char *name) { size_t i, len = strlen(name); for (i = 0; hm->header_names[i].len > 0; i++) { struct mg_str *h = &hm->header_names[i], *v = &hm->header_values[i]; if (h->p != NULL && h->len == len && !mg_ncasecmp(h->p, name, len)) return v; } return NULL; } #if MG_ENABLE_FILESYSTEM static void mg_http_transfer_file_data(struct mg_connection *nc) { struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); char buf[MG_MAX_HTTP_SEND_MBUF]; size_t n = 0, to_read = 0, left = (size_t)(pd->file.cl - pd->file.sent); if (pd->file.type == DATA_FILE) { struct mbuf *io = &nc->send_mbuf; if (io->len < sizeof(buf)) { to_read = sizeof(buf) - io->len; } if (left > 0 && to_read > left) { to_read = left; } if (to_read == 0) { /* Rate limiting. send_mbuf is too full, wait until it's drained. */ } else if (pd->file.sent < pd->file.cl && (n = mg_fread(buf, 1, to_read, pd->file.fp)) > 0) { mg_send(nc, buf, n); pd->file.sent += n; } else { if (!pd->file.keepalive) nc->flags |= MG_F_SEND_AND_CLOSE; mg_http_free_proto_data_file(&pd->file); } } else if (pd->file.type == DATA_PUT) { struct mbuf *io = &nc->recv_mbuf; size_t to_write = left <= 0 ? 0 : left < io->len ? (size_t) left : io->len; size_t n = mg_fwrite(io->buf, 1, to_write, pd->file.fp); if (n > 0) { mbuf_remove(io, n); pd->file.sent += n; } if (n == 0 || pd->file.sent >= pd->file.cl) { if (!pd->file.keepalive) nc->flags |= MG_F_SEND_AND_CLOSE; mg_http_free_proto_data_file(&pd->file); } } #if MG_ENABLE_HTTP_CGI else if (pd->cgi.cgi_nc != NULL) { /* This is POST data that needs to be forwarded to the CGI process */ if (pd->cgi.cgi_nc != NULL) { mg_forward(nc, pd->cgi.cgi_nc); } else { nc->flags |= MG_F_SEND_AND_CLOSE; } } #endif } #endif /* MG_ENABLE_FILESYSTEM */ /* * Parse chunked-encoded buffer. Return 0 if the buffer is not encoded, or * if it's incomplete. If the chunk is fully buffered, return total number of * bytes in a chunk, and store data in `data`, `data_len`. */ static size_t mg_http_parse_chunk(char *buf, size_t len, char **chunk_data, size_t *chunk_len) { unsigned char *s = (unsigned char *) buf; size_t n = 0; /* scanned chunk length */ size_t i = 0; /* index in s */ /* Scan chunk length. That should be a hexadecimal number. */ while (i < len && isxdigit(s[i])) { n *= 16; n += (s[i] >= '0' && s[i] <= '9') ? s[i] - '0' : tolower(s[i]) - 'a' + 10; i++; } /* Skip new line */ if (i == 0 || i + 2 > len || s[i] != '\r' || s[i + 1] != '\n') { return 0; } i += 2; /* Record where the data is */ *chunk_data = (char *) s + i; *chunk_len = n; /* Skip data */ i += n; /* Skip new line */ if (i == 0 || i + 2 > len || s[i] != '\r' || s[i + 1] != '\n') { return 0; } return i + 2; } MG_INTERNAL size_t mg_handle_chunked(struct mg_connection *nc, struct http_message *hm, char *buf, size_t blen) { struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); char *data; size_t i, n, data_len, body_len, zero_chunk_received = 0; /* Find out piece of received data that is not yet reassembled */ body_len = (size_t) pd->chunk.body_len; assert(blen >= body_len); /* Traverse all fully buffered chunks */ for (i = body_len; (n = mg_http_parse_chunk(buf + i, blen - i, &data, &data_len)) > 0; i += n) { /* Collapse chunk data to the rest of HTTP body */ memmove(buf + body_len, data, data_len); body_len += data_len; hm->body.len = body_len; if (data_len == 0) { zero_chunk_received = 1; i += n; break; } } if (i > body_len) { /* Shift unparsed content to the parsed body */ assert(i <= blen); memmove(buf + body_len, buf + i, blen - i); memset(buf + body_len + blen - i, 0, i - body_len); nc->recv_mbuf.len -= i - body_len; pd->chunk.body_len = body_len; /* Send MG_EV_HTTP_CHUNK event */ nc->flags &= ~MG_F_DELETE_CHUNK; mg_call(nc, nc->handler, nc->user_data, MG_EV_HTTP_CHUNK, hm); /* Delete processed data if user set MG_F_DELETE_CHUNK flag */ if (nc->flags & MG_F_DELETE_CHUNK) { memset(buf, 0, body_len); memmove(buf, buf + body_len, blen - i); nc->recv_mbuf.len -= body_len; hm->body.len = 0; pd->chunk.body_len = 0; } if (zero_chunk_received) { /* Total message size is len(body) + len(headers) */ hm->message.len = (size_t) pd->chunk.body_len + blen - i + (hm->body.p - hm->message.p); } } return body_len; } struct mg_http_endpoint *mg_http_get_endpoint_handler(struct mg_connection *nc, struct mg_str *uri_path) { struct mg_http_proto_data *pd; struct mg_http_endpoint *ret = NULL; int matched, matched_max = 0; struct mg_http_endpoint *ep; if (nc == NULL) { return NULL; } pd = mg_http_get_proto_data(nc); ep = pd->endpoints; while (ep != NULL) { const struct mg_str name_s = {ep->name, ep->name_len}; if ((matched = mg_match_prefix_n(name_s, *uri_path)) != -1) { if (matched > matched_max) { /* Looking for the longest suitable handler */ ret = ep; matched_max = matched; } } ep = ep->next; } return ret; } static void mg_http_call_endpoint_handler(struct mg_connection *nc, int ev, struct http_message *hm) { struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); void *user_data = nc->user_data; if (ev == MG_EV_HTTP_REQUEST) { struct mg_http_endpoint *ep = mg_http_get_endpoint_handler(nc->listener, &hm->uri); if (ep != NULL) { pd->endpoint_handler = ep->handler; #if MG_ENABLE_CALLBACK_USERDATA user_data = ep->user_data; #endif } } mg_call(nc, pd->endpoint_handler ? pd->endpoint_handler : nc->handler, user_data, ev, hm); } #if MG_ENABLE_HTTP_STREAMING_MULTIPART static void mg_http_multipart_continue(struct mg_connection *nc); static void mg_http_multipart_begin(struct mg_connection *nc, struct http_message *hm, int req_len); #endif /* * lx106 compiler has a bug (TODO(mkm) report and insert tracking bug here) * If a big structure is declared in a big function, lx106 gcc will make it * even bigger (round up to 4k, from 700 bytes of actual size). */ #ifdef __xtensa__ static void mg_http_handler2(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data), struct http_message *hm) __attribute__((noinline)); void mg_http_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { struct http_message hm; mg_http_handler2(nc, ev, ev_data MG_UD_ARG(user_data), &hm); } static void mg_http_handler2(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data), struct http_message *hm) { #else /* !__XTENSA__ */ void mg_http_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { struct http_message shm; struct http_message *hm = &shm; #endif /* __XTENSA__ */ struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); struct mbuf *io = &nc->recv_mbuf; int req_len; const int is_req = (nc->listener != NULL); #if MG_ENABLE_HTTP_WEBSOCKET struct mg_str *vec; #endif if (ev == MG_EV_CLOSE) { #if MG_ENABLE_HTTP_CGI /* Close associated CGI forwarder connection */ if (pd->cgi.cgi_nc != NULL) { pd->cgi.cgi_nc->user_data = NULL; pd->cgi.cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY; } #endif #if MG_ENABLE_HTTP_STREAMING_MULTIPART if (pd->mp_stream.boundary != NULL) { /* * Multipart message is in progress, but connection is closed. * Finish part and request with an error flag. */ struct mg_http_multipart_part mp; memset(&mp, 0, sizeof(mp)); mp.status = -1; mp.var_name = pd->mp_stream.var_name; mp.file_name = pd->mp_stream.file_name; mg_call(nc, (pd->endpoint_handler ? pd->endpoint_handler : nc->handler), nc->user_data, MG_EV_HTTP_PART_END, &mp); mp.var_name = NULL; mp.file_name = NULL; mg_call(nc, (pd->endpoint_handler ? pd->endpoint_handler : nc->handler), nc->user_data, MG_EV_HTTP_MULTIPART_REQUEST_END, &mp); } else #endif if (io->len > 0 && mg_parse_http(io->buf, io->len, hm, is_req) > 0) { /* * For HTTP messages without Content-Length, always send HTTP message * before MG_EV_CLOSE message. */ int ev2 = is_req ? MG_EV_HTTP_REQUEST : MG_EV_HTTP_REPLY; hm->message.len = io->len; hm->body.len = io->buf + io->len - hm->body.p; mg_http_call_endpoint_handler(nc, ev2, hm); } } #if MG_ENABLE_FILESYSTEM if (pd->file.fp != NULL) { mg_http_transfer_file_data(nc); } #endif mg_call(nc, nc->handler, nc->user_data, ev, ev_data); if (ev == MG_EV_RECV) { struct mg_str *s; #if MG_ENABLE_HTTP_STREAMING_MULTIPART if (pd->mp_stream.boundary != NULL) { mg_http_multipart_continue(nc); return; } #endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */ req_len = mg_parse_http(io->buf, io->len, hm, is_req); if (req_len > 0 && (s = mg_get_http_header(hm, "Transfer-Encoding")) != NULL && mg_vcasecmp(s, "chunked") == 0) { mg_handle_chunked(nc, hm, io->buf + req_len, io->len - req_len); } #if MG_ENABLE_HTTP_STREAMING_MULTIPART if (req_len > 0 && (s = mg_get_http_header(hm, "Content-Type")) != NULL && s->len >= 9 && strncmp(s->p, "multipart", 9) == 0) { mg_http_multipart_begin(nc, hm, req_len); mg_http_multipart_continue(nc); return; } #endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */ /* TODO(alashkin): refactor this ifelseifelseifelseifelse */ if ((req_len < 0 || (req_len == 0 && io->len >= MG_MAX_HTTP_REQUEST_SIZE))) { DBG(("invalid request")); nc->flags |= MG_F_CLOSE_IMMEDIATELY; } else if (req_len == 0) { /* Do nothing, request is not yet fully buffered */ } #if MG_ENABLE_HTTP_WEBSOCKET else if (nc->listener == NULL && mg_get_http_header(hm, "Sec-WebSocket-Accept")) { /* We're websocket client, got handshake response from server. */ /* TODO(lsm): check the validity of accept Sec-WebSocket-Accept */ mbuf_remove(io, req_len); nc->proto_handler = mg_ws_handler; nc->flags |= MG_F_IS_WEBSOCKET; mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_HANDSHAKE_DONE, NULL); mg_ws_handler(nc, MG_EV_RECV, ev_data MG_UD_ARG(user_data)); } else if (nc->listener != NULL && (vec = mg_get_http_header(hm, "Sec-WebSocket-Key")) != NULL) { struct mg_http_endpoint *ep; /* This is a websocket request. Switch protocol handlers. */ mbuf_remove(io, req_len); nc->proto_handler = mg_ws_handler; nc->flags |= MG_F_IS_WEBSOCKET; /* * If we have a handler set up with mg_register_http_endpoint(), * deliver subsequent websocket events to this handler after the * protocol switch. */ ep = mg_http_get_endpoint_handler(nc->listener, &hm->uri); if (ep != NULL) { nc->handler = ep->handler; #if MG_ENABLE_CALLBACK_USERDATA nc->user_data = ep->user_data; #endif } /* Send handshake */ mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_HANDSHAKE_REQUEST, hm); if (!(nc->flags & (MG_F_CLOSE_IMMEDIATELY | MG_F_SEND_AND_CLOSE))) { if (nc->send_mbuf.len == 0) { mg_ws_handshake(nc, vec); } mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_HANDSHAKE_DONE, NULL); mg_ws_handler(nc, MG_EV_RECV, ev_data MG_UD_ARG(user_data)); } } #endif /* MG_ENABLE_HTTP_WEBSOCKET */ else if (hm->message.len <= io->len) { int trigger_ev = nc->listener ? MG_EV_HTTP_REQUEST : MG_EV_HTTP_REPLY; char addr[32]; mg_sock_addr_to_str(&nc->sa, addr, sizeof(addr), MG_SOCK_STRINGIFY_IP | MG_SOCK_STRINGIFY_PORT); DBG(("%p %s %.*s %.*s", nc, addr, (int) hm->method.len, hm->method.p, (int) hm->uri.len, hm->uri.p)); /* Whole HTTP message is fully buffered, call event handler */ #if MG_ENABLE_JAVASCRIPT v7_val_t v1, v2, headers, req, args, res; struct v7 *v7 = nc->mgr->v7; const char *ev_name = trigger_ev == MG_EV_HTTP_REPLY ? "onsnd" : "onrcv"; int i, js_callback_handled_request = 0; if (v7 != NULL) { /* Lookup JS callback */ v1 = v7_get(v7, v7_get_global(v7), "Http", ~0); v2 = v7_get(v7, v1, ev_name, ~0); /* Create callback params. TODO(lsm): own/disown those */ args = v7_mk_array(v7); req = v7_mk_object(v7); headers = v7_mk_object(v7); /* Populate request object */ v7_set(v7, req, "method", ~0, v7_mk_string(v7, hm->method.p, hm->method.len, 1)); v7_set(v7, req, "uri", ~0, v7_mk_string(v7, hm->uri.p, hm->uri.len, 1)); v7_set(v7, req, "body", ~0, v7_mk_string(v7, hm->body.p, hm->body.len, 1)); v7_set(v7, req, "headers", ~0, headers); for (i = 0; hm->header_names[i].len > 0; i++) { const struct mg_str *name = &hm->header_names[i]; const struct mg_str *value = &hm->header_values[i]; v7_set(v7, headers, name->p, name->len, v7_mk_string(v7, value->p, value->len, 1)); } /* Invoke callback. TODO(lsm): report errors */ v7_array_push(v7, args, v7_mk_foreign(v7, nc)); v7_array_push(v7, args, req); if (v7_apply(v7, v2, V7_UNDEFINED, args, &res) == V7_OK && v7_is_truthy(v7, res)) { js_callback_handled_request++; } } /* If JS callback returns true, stop request processing */ if (js_callback_handled_request) { nc->flags |= MG_F_SEND_AND_CLOSE; } else { mg_http_call_endpoint_handler(nc, trigger_ev, hm); } #else mg_http_call_endpoint_handler(nc, trigger_ev, hm); #endif mbuf_remove(io, hm->message.len); } } (void) pd; } static size_t mg_get_line_len(const char *buf, size_t buf_len) { size_t len = 0; while (len < buf_len && buf[len] != '\n') len++; return len == buf_len ? 0 : len + 1; } #if MG_ENABLE_HTTP_STREAMING_MULTIPART static void mg_http_multipart_begin(struct mg_connection *nc, struct http_message *hm, int req_len) { struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); struct mg_str *ct; struct mbuf *io = &nc->recv_mbuf; void *user_data = nc->user_data; char boundary[100]; int boundary_len; ct = mg_get_http_header(hm, "Content-Type"); if (ct == NULL) { /* We need more data - or it isn't multipart mesage */ goto exit_mp; } /* Content-type should start with "multipart" */ if (ct->len < 9 || strncmp(ct->p, "multipart", 9) != 0) { goto exit_mp; } boundary_len = mg_http_parse_header(ct, "boundary", boundary, sizeof(boundary)); if (boundary_len == 0) { /* * Content type is multipart, but there is no boundary, * probably malformed request */ nc->flags = MG_F_CLOSE_IMMEDIATELY; DBG(("invalid request")); goto exit_mp; } /* If we reach this place - that is multipart request */ if (pd->mp_stream.boundary != NULL) { /* * Another streaming request was in progress, * looks like protocol error */ nc->flags |= MG_F_CLOSE_IMMEDIATELY; } else { struct mg_http_endpoint *ep = NULL; pd->mp_stream.state = MPS_BEGIN; pd->mp_stream.boundary = strdup(boundary); pd->mp_stream.boundary_len = strlen(boundary); pd->mp_stream.var_name = pd->mp_stream.file_name = NULL; pd->endpoint_handler = nc->handler; ep = mg_http_get_endpoint_handler(nc->listener, &hm->uri); if (ep != NULL) { pd->endpoint_handler = ep->handler; #if MG_ENABLE_CALLBACK_USERDATA user_data = ep->user_data; #endif } mg_call(nc, pd->endpoint_handler, user_data, MG_EV_HTTP_MULTIPART_REQUEST, hm); mbuf_remove(io, req_len); } exit_mp: ; } #define CONTENT_DISPOSITION "Content-Disposition: " static void mg_http_multipart_call_handler(struct mg_connection *c, int ev, const char *data, size_t data_len) { struct mg_http_multipart_part mp; struct mg_http_proto_data *pd = mg_http_get_proto_data(c); memset(&mp, 0, sizeof(mp)); mp.var_name = pd->mp_stream.var_name; mp.file_name = pd->mp_stream.file_name; mp.user_data = pd->mp_stream.user_data; mp.data.p = data; mp.data.len = data_len; mg_call(c, pd->endpoint_handler, c->user_data, ev, &mp); pd->mp_stream.user_data = mp.user_data; } static int mg_http_multipart_got_chunk(struct mg_connection *c) { struct mg_http_proto_data *pd = mg_http_get_proto_data(c); struct mbuf *io = &c->recv_mbuf; mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_DATA, io->buf, pd->mp_stream.prev_io_len); mbuf_remove(io, pd->mp_stream.prev_io_len); pd->mp_stream.prev_io_len = 0; pd->mp_stream.state = MPS_WAITING_FOR_CHUNK; return 0; } static int mg_http_multipart_finalize(struct mg_connection *c) { struct mg_http_proto_data *pd = mg_http_get_proto_data(c); mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_END, NULL, 0); MG_FREE((void *) pd->mp_stream.file_name); pd->mp_stream.file_name = NULL; MG_FREE((void *) pd->mp_stream.var_name); pd->mp_stream.var_name = NULL; mg_http_multipart_call_handler(c, MG_EV_HTTP_MULTIPART_REQUEST_END, NULL, 0); mg_http_free_proto_data_mp_stream(&pd->mp_stream); pd->mp_stream.state = MPS_FINISHED; return 1; } static int mg_http_multipart_wait_for_boundary(struct mg_connection *c) { const char *boundary; struct mbuf *io = &c->recv_mbuf; struct mg_http_proto_data *pd = mg_http_get_proto_data(c); if ((int) io->len < pd->mp_stream.boundary_len + 2) { return 0; } boundary = c_strnstr(io->buf, pd->mp_stream.boundary, io->len); if (boundary != NULL) { const char *boundary_end = (boundary + pd->mp_stream.boundary_len); if (io->len - (boundary_end - io->buf) < 4) { return 0; } if (strncmp(boundary_end, "--\r\n", 4) == 0) { pd->mp_stream.state = MPS_FINALIZE; mbuf_remove(io, (boundary_end - io->buf) + 4); } else { pd->mp_stream.state = MPS_GOT_BOUNDARY; } } else { return 0; } return 1; } static int mg_http_multipart_process_boundary(struct mg_connection *c) { int data_size; const char *boundary, *block_begin; struct mbuf *io = &c->recv_mbuf; struct mg_http_proto_data *pd = mg_http_get_proto_data(c); char file_name[100], var_name[100]; int line_len; boundary = c_strnstr(io->buf, pd->mp_stream.boundary, io->len); block_begin = boundary + pd->mp_stream.boundary_len + 2; data_size = io->len - (block_begin - io->buf); while (data_size > 0 && (line_len = mg_get_line_len(block_begin, data_size)) != 0) { if (line_len > (int) sizeof(CONTENT_DISPOSITION) && mg_ncasecmp(block_begin, CONTENT_DISPOSITION, sizeof(CONTENT_DISPOSITION) - 1) == 0) { struct mg_str header; header.p = block_begin + sizeof(CONTENT_DISPOSITION) - 1; header.len = line_len - sizeof(CONTENT_DISPOSITION) - 1; mg_http_parse_header(&header, "name", var_name, sizeof(var_name) - 2); mg_http_parse_header(&header, "filename", file_name, sizeof(file_name) - 2); block_begin += line_len; data_size -= line_len; continue; } if (line_len == 2 && mg_ncasecmp(block_begin, "\r\n", 2) == 0) { mbuf_remove(io, block_begin - io->buf + 2); if (pd->mp_stream.processing_part != 0) { mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_END, NULL, 0); } MG_FREE((void *) pd->mp_stream.file_name); pd->mp_stream.file_name = strdup(file_name); MG_FREE((void *) pd->mp_stream.var_name); pd->mp_stream.var_name = strdup(var_name); mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_BEGIN, NULL, 0); pd->mp_stream.state = MPS_WAITING_FOR_CHUNK; pd->mp_stream.processing_part++; return 1; } block_begin += line_len; } pd->mp_stream.state = MPS_WAITING_FOR_BOUNDARY; return 0; } static int mg_http_multipart_continue_wait_for_chunk(struct mg_connection *c) { struct mg_http_proto_data *pd = mg_http_get_proto_data(c); struct mbuf *io = &c->recv_mbuf; const char *boundary; if ((int) io->len < pd->mp_stream.boundary_len + 6 /* \r\n, --, -- */) { return 0; } boundary = c_strnstr(io->buf, pd->mp_stream.boundary, io->len); if (boundary == NULL && pd->mp_stream.prev_io_len == 0) { pd->mp_stream.prev_io_len = io->len; return 0; } else if (boundary == NULL && (int) io->len > pd->mp_stream.prev_io_len + pd->mp_stream.boundary_len + 4) { pd->mp_stream.state = MPS_GOT_CHUNK; return 1; } else if (boundary != NULL) { int data_size = (boundary - io->buf - 4); mg_http_multipart_call_handler(c, MG_EV_HTTP_PART_DATA, io->buf, data_size); mbuf_remove(io, (boundary - io->buf)); pd->mp_stream.prev_io_len = 0; pd->mp_stream.state = MPS_WAITING_FOR_BOUNDARY; return 1; } else { return 0; } } static void mg_http_multipart_continue(struct mg_connection *c) { struct mg_http_proto_data *pd = mg_http_get_proto_data(c); while (1) { switch (pd->mp_stream.state) { case MPS_BEGIN: { pd->mp_stream.state = MPS_WAITING_FOR_BOUNDARY; break; } case MPS_WAITING_FOR_BOUNDARY: { if (mg_http_multipart_wait_for_boundary(c) == 0) { return; } break; } case MPS_GOT_BOUNDARY: { if (mg_http_multipart_process_boundary(c) == 0) { return; } break; } case MPS_WAITING_FOR_CHUNK: { if (mg_http_multipart_continue_wait_for_chunk(c) == 0) { return; } break; } case MPS_GOT_CHUNK: { if (mg_http_multipart_got_chunk(c) == 0) { return; } break; } case MPS_FINALIZE: { if (mg_http_multipart_finalize(c) == 0) { return; } break; } case MPS_FINISHED: { mbuf_remove(&c->recv_mbuf, c->recv_mbuf.len); return; } } } } struct file_upload_state { char *lfn; size_t num_recd; FILE *fp; }; #endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */ void mg_set_protocol_http_websocket(struct mg_connection *nc) { nc->proto_handler = mg_http_handler; } const char *mg_status_message(int status_code) { switch (status_code) { case 206: return "Partial Content"; case 301: return "Moved"; case 302: return "Found"; case 400: return "Bad Request"; case 401: return "Unauthorized"; case 403: return "Forbidden"; case 404: return "Not Found"; case 416: return "Requested Range Not Satisfiable"; case 418: return "I'm a teapot"; case 500: return "Internal Server Error"; case 502: return "Bad Gateway"; case 503: return "Service Unavailable"; #if MG_ENABLE_EXTRA_ERRORS_DESC case 100: return "Continue"; case 101: return "Switching Protocols"; case 102: return "Processing"; case 200: return "OK"; case 201: return "Created"; case 202: return "Accepted"; case 203: return "Non-Authoritative Information"; case 204: return "No Content"; case 205: return "Reset Content"; case 207: return "Multi-Status"; case 208: return "Already Reported"; case 226: return "IM Used"; case 300: return "Multiple Choices"; case 303: return "See Other"; case 304: return "Not Modified"; case 305: return "Use Proxy"; case 306: return "Switch Proxy"; case 307: return "Temporary Redirect"; case 308: return "Permanent Redirect"; case 402: return "Payment Required"; case 405: return "Method Not Allowed"; case 406: return "Not Acceptable"; case 407: return "Proxy Authentication Required"; case 408: return "Request Timeout"; case 409: return "Conflict"; case 410: return "Gone"; case 411: return "Length Required"; case 412: return "Precondition Failed"; case 413: return "Payload Too Large"; case 414: return "URI Too Long"; case 415: return "Unsupported Media Type"; case 417: return "Expectation Failed"; case 422: return "Unprocessable Entity"; case 423: return "Locked"; case 424: return "Failed Dependency"; case 426: return "Upgrade Required"; case 428: return "Precondition Required"; case 429: return "Too Many Requests"; case 431: return "Request Header Fields Too Large"; case 451: return "Unavailable For Legal Reasons"; case 501: return "Not Implemented"; case 504: return "Gateway Timeout"; case 505: return "HTTP Version Not Supported"; case 506: return "Variant Also Negotiates"; case 507: return "Insufficient Storage"; case 508: return "Loop Detected"; case 510: return "Not Extended"; case 511: return "Network Authentication Required"; #endif /* MG_ENABLE_EXTRA_ERRORS_DESC */ default: return "OK"; } } void mg_send_response_line_s(struct mg_connection *nc, int status_code, const struct mg_str extra_headers) { mg_printf(nc, "HTTP/1.1 %d %s\r\nServer: %s\r\n", status_code, mg_status_message(status_code), mg_version_header); if (extra_headers.len > 0) { mg_printf(nc, "%.*s\r\n", (int) extra_headers.len, extra_headers.p); } } void mg_send_response_line(struct mg_connection *nc, int status_code, const char *extra_headers) { mg_send_response_line_s(nc, status_code, mg_mk_str(extra_headers)); } void mg_http_send_redirect(struct mg_connection *nc, int status_code, const struct mg_str location, const struct mg_str extra_headers) { char bbody[100], *pbody = bbody; int bl = mg_asprintf(&pbody, sizeof(bbody), "<p>Moved <a href='%.*s'>here</a>.\r\n", (int) location.len, location.p); char bhead[150], *phead = bhead; mg_asprintf(&phead, sizeof(bhead), "Location: %.*s\r\n" "Content-Type: text/html\r\n" "Content-Length: %d\r\n" "Cache-Control: no-cache\r\n" "%.*s%s", (int) location.len, location.p, bl, (int) extra_headers.len, extra_headers.p, (extra_headers.len > 0 ? "\r\n" : "")); mg_send_response_line(nc, status_code, phead); if (phead != bhead) MG_FREE(phead); mg_send(nc, pbody, bl); if (pbody != bbody) MG_FREE(pbody); } void mg_send_head(struct mg_connection *c, int status_code, int64_t content_length, const char *extra_headers) { mg_send_response_line(c, status_code, extra_headers); if (content_length < 0) { mg_printf(c, "%s", "Transfer-Encoding: chunked\r\n"); } else { mg_printf(c, "Content-Length: %" INT64_FMT "\r\n", content_length); } mg_send(c, "\r\n", 2); } void mg_http_send_error(struct mg_connection *nc, int code, const char *reason) { if (!reason) reason = mg_status_message(code); LOG(LL_DEBUG, ("%p %d %s", nc, code, reason)); mg_send_head(nc, code, strlen(reason), "Content-Type: text/plain\r\nConnection: close"); mg_send(nc, reason, strlen(reason)); nc->flags |= MG_F_SEND_AND_CLOSE; } #if MG_ENABLE_FILESYSTEM static void mg_http_construct_etag(char *buf, size_t buf_len, const cs_stat_t *st) { snprintf(buf, buf_len, "\"%lx.%" INT64_FMT "\"", (unsigned long) st->st_mtime, (int64_t) st->st_size); } #ifndef WINCE static void mg_gmt_time_string(char *buf, size_t buf_len, time_t *t) { strftime(buf, buf_len, "%a, %d %b %Y %H:%M:%S GMT", gmtime(t)); } #else /* Look wince_lib.c for WindowsCE implementation */ static void mg_gmt_time_string(char *buf, size_t buf_len, time_t *t); #endif static int mg_http_parse_range_header(const struct mg_str *header, int64_t *a, int64_t *b) { /* * There is no snscanf. Headers are not guaranteed to be NUL-terminated, * so we have this. Ugh. */ int result; char *p = (char *) MG_MALLOC(header->len + 1); if (p == NULL) return 0; memcpy(p, header->p, header->len); p[header->len] = '\0'; result = sscanf(p, "bytes=%" INT64_FMT "-%" INT64_FMT, a, b); MG_FREE(p); return result; } void mg_http_serve_file(struct mg_connection *nc, struct http_message *hm, const char *path, const struct mg_str mime_type, const struct mg_str extra_headers) { struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); cs_stat_t st; LOG(LL_DEBUG, ("%p [%s] %.*s", nc, path, (int) mime_type.len, mime_type.p)); if (mg_stat(path, &st) != 0 || (pd->file.fp = mg_fopen(path, "rb")) == NULL) { int code, err = mg_get_errno(); switch (err) { case EACCES: code = 403; break; case ENOENT: code = 404; break; default: code = 500; }; mg_http_send_error(nc, code, "Open failed"); } else { char etag[50], current_time[50], last_modified[50], range[70]; time_t t = (time_t) mg_time(); int64_t r1 = 0, r2 = 0, cl = st.st_size; struct mg_str *range_hdr = mg_get_http_header(hm, "Range"); int n, status_code = 200; /* Handle Range header */ range[0] = '\0'; if (range_hdr != NULL && (n = mg_http_parse_range_header(range_hdr, &r1, &r2)) > 0 && r1 >= 0 && r2 >= 0) { /* If range is specified like "400-", set second limit to content len */ if (n == 1) { r2 = cl - 1; } if (r1 > r2 || r2 >= cl) { status_code = 416; cl = 0; snprintf(range, sizeof(range), "Content-Range: bytes */%" INT64_FMT "\r\n", (int64_t) st.st_size); } else { status_code = 206; cl = r2 - r1 + 1; snprintf(range, sizeof(range), "Content-Range: bytes %" INT64_FMT "-%" INT64_FMT "/%" INT64_FMT "\r\n", r1, r1 + cl - 1, (int64_t) st.st_size); #if _FILE_OFFSET_BITS == 64 || _POSIX_C_SOURCE >= 200112L || \ _XOPEN_SOURCE >= 600 fseeko(pd->file.fp, r1, SEEK_SET); #else fseek(pd->file.fp, (long) r1, SEEK_SET); #endif } } #if !MG_DISABLE_HTTP_KEEP_ALIVE { struct mg_str *conn_hdr = mg_get_http_header(hm, "Connection"); if (conn_hdr != NULL) { pd->file.keepalive = (mg_vcasecmp(conn_hdr, "keep-alive") == 0); } else { pd->file.keepalive = (mg_vcmp(&hm->proto, "HTTP/1.1") == 0); } } #endif mg_http_construct_etag(etag, sizeof(etag), &st); mg_gmt_time_string(current_time, sizeof(current_time), &t); mg_gmt_time_string(last_modified, sizeof(last_modified), &st.st_mtime); /* * Content length casted to size_t because: * 1) that's the maximum buffer size anyway * 2) ESP8266 RTOS SDK newlib vprintf cannot contain a 64bit arg at non-last * position * TODO(mkm): fix ESP8266 RTOS SDK */ mg_send_response_line_s(nc, status_code, extra_headers); mg_printf(nc, "Date: %s\r\n" "Last-Modified: %s\r\n" "Accept-Ranges: bytes\r\n" "Content-Type: %.*s\r\n" "Connection: %s\r\n" "Content-Length: %" SIZE_T_FMT "\r\n" "%sEtag: %s\r\n\r\n", current_time, last_modified, (int) mime_type.len, mime_type.p, (pd->file.keepalive ? "keep-alive" : "close"), (size_t) cl, range, etag); pd->file.cl = cl; pd->file.type = DATA_FILE; mg_http_transfer_file_data(nc); } } static void mg_http_serve_file2(struct mg_connection *nc, const char *path, struct http_message *hm, struct mg_serve_http_opts *opts) { #if MG_ENABLE_HTTP_SSI if (mg_match_prefix(opts->ssi_pattern, strlen(opts->ssi_pattern), path) > 0) { mg_handle_ssi_request(nc, hm, path, opts); return; } #endif mg_http_serve_file(nc, hm, path, mg_get_mime_type(path, "text/plain", opts), mg_mk_str(opts->extra_headers)); } #endif int mg_url_decode(const char *src, int src_len, char *dst, int dst_len, int is_form_url_encoded) { int i, j, a, b; #define HEXTOI(x) (isdigit(x) ? x - '0' : x - 'W') for (i = j = 0; i < src_len && j < dst_len - 1; i++, j++) { if (src[i] == '%') { if (i < src_len - 2 && isxdigit(*(const unsigned char *) (src + i + 1)) && isxdigit(*(const unsigned char *) (src + i + 2))) { a = tolower(*(const unsigned char *) (src + i + 1)); b = tolower(*(const unsigned char *) (src + i + 2)); dst[j] = (char) ((HEXTOI(a) << 4) | HEXTOI(b)); i += 2; } else { return -1; } } else if (is_form_url_encoded && src[i] == '+') { dst[j] = ' '; } else { dst[j] = src[i]; } } dst[j] = '\0'; /* Null-terminate the destination */ return i >= src_len ? j : -1; } int mg_get_http_var(const struct mg_str *buf, const char *name, char *dst, size_t dst_len) { const char *p, *e, *s; size_t name_len; int len; /* * According to the documentation function returns negative * value in case of error. For debug purposes it returns: * -1 - src is wrong (NUUL) * -2 - dst is wrong (NULL) * -3 - failed to decode url or dst is to small */ if (dst == NULL || dst_len == 0) { len = -2; } else if (buf->p == NULL || name == NULL || buf->len == 0) { len = -1; dst[0] = '\0'; } else { name_len = strlen(name); e = buf->p + buf->len; len = 0; dst[0] = '\0'; for (p = buf->p; p + name_len < e; p++) { if ((p == buf->p || p[-1] == '&') && p[name_len] == '=' && !mg_ncasecmp(name, p, name_len)) { p += name_len + 1; s = (const char *) memchr(p, '&', (size_t)(e - p)); if (s == NULL) { s = e; } len = mg_url_decode(p, (size_t)(s - p), dst, dst_len, 1); /* -1 means: failed to decode or dst is too small */ if (len == -1) { len = -3; } break; } } } return len; } void mg_send_http_chunk(struct mg_connection *nc, const char *buf, size_t len) { char chunk_size[50]; int n; n = snprintf(chunk_size, sizeof(chunk_size), "%lX\r\n", (unsigned long) len); mg_send(nc, chunk_size, n); mg_send(nc, buf, len); mg_send(nc, "\r\n", 2); } void mg_printf_http_chunk(struct mg_connection *nc, const char *fmt, ...) { char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem; int len; va_list ap; va_start(ap, fmt); len = mg_avprintf(&buf, sizeof(mem), fmt, ap); va_end(ap); if (len >= 0) { mg_send_http_chunk(nc, buf, len); } /* LCOV_EXCL_START */ if (buf != mem && buf != NULL) { MG_FREE(buf); } /* LCOV_EXCL_STOP */ } void mg_printf_html_escape(struct mg_connection *nc, const char *fmt, ...) { char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem; int i, j, len; va_list ap; va_start(ap, fmt); len = mg_avprintf(&buf, sizeof(mem), fmt, ap); va_end(ap); if (len >= 0) { for (i = j = 0; i < len; i++) { if (buf[i] == '<' || buf[i] == '>') { mg_send(nc, buf + j, i - j); mg_send(nc, buf[i] == '<' ? "&lt;" : "&gt;", 4); j = i + 1; } } mg_send(nc, buf + j, i - j); } /* LCOV_EXCL_START */ if (buf != mem && buf != NULL) { MG_FREE(buf); } /* LCOV_EXCL_STOP */ } int mg_http_parse_header(struct mg_str *hdr, const char *var_name, char *buf, size_t buf_size) { int ch = ' ', ch1 = ',', len = 0, n = strlen(var_name); const char *p, *end = hdr ? hdr->p + hdr->len : NULL, *s = NULL; if (buf != NULL && buf_size > 0) buf[0] = '\0'; if (hdr == NULL) return 0; /* Find where variable starts */ for (s = hdr->p; s != NULL && s + n < end; s++) { if ((s == hdr->p || s[-1] == ch || s[-1] == ch1 || s[-1] == ';') && s[n] == '=' && !strncmp(s, var_name, n)) break; } if (s != NULL && &s[n + 1] < end) { s += n + 1; if (*s == '"' || *s == '\'') { ch = ch1 = *s++; } p = s; while (p < end && p[0] != ch && p[0] != ch1 && len < (int) buf_size) { if (ch != ' ' && p[0] == '\\' && p[1] == ch) p++; buf[len++] = *p++; } if (len >= (int) buf_size || (ch != ' ' && *p != ch)) { len = 0; } else { if (len > 0 && s[len - 1] == ',') len--; if (len > 0 && s[len - 1] == ';') len--; buf[len] = '\0'; } } return len; } int mg_get_http_basic_auth(struct http_message *hm, char *user, size_t user_len, char *pass, size_t pass_len) { struct mg_str *hdr = mg_get_http_header(hm, "Authorization"); if (hdr == NULL) return -1; return mg_parse_http_basic_auth(hdr, user, user_len, pass, pass_len); } int mg_parse_http_basic_auth(struct mg_str *hdr, char *user, size_t user_len, char *pass, size_t pass_len) { char *buf = NULL; char fmt[64]; int res = 0; if (mg_strncmp(*hdr, mg_mk_str("Basic "), 6) != 0) return -1; buf = (char *) MG_MALLOC(hdr->len); cs_base64_decode((unsigned char *) hdr->p + 6, hdr->len, buf, NULL); /* e.g. "%123[^:]:%321[^\n]" */ snprintf(fmt, sizeof(fmt), "%%%" SIZE_T_FMT "[^:]:%%%" SIZE_T_FMT "[^\n]", user_len - 1, pass_len - 1); if (sscanf(buf, fmt, user, pass) == 0) { res = -1; } MG_FREE(buf); return res; } #if MG_ENABLE_FILESYSTEM static int mg_is_file_hidden(const char *path, const struct mg_serve_http_opts *opts, int exclude_specials) { const char *p1 = opts->per_directory_auth_file; const char *p2 = opts->hidden_file_pattern; /* Strip directory path from the file name */ const char *pdir = strrchr(path, DIRSEP); if (pdir != NULL) { path = pdir + 1; } return (exclude_specials && (!strcmp(path, ".") || !strcmp(path, ".."))) || (p1 != NULL && mg_match_prefix(p1, strlen(p1), path) == (int) strlen(p1)) || (p2 != NULL && mg_match_prefix(p2, strlen(p2), path) > 0); } #if !MG_DISABLE_HTTP_DIGEST_AUTH static void mg_mkmd5resp(const char *method, size_t method_len, const char *uri, size_t uri_len, const char *ha1, size_t ha1_len, const char *nonce, size_t nonce_len, const char *nc, size_t nc_len, const char *cnonce, size_t cnonce_len, const char *qop, size_t qop_len, char *resp) { static const char colon[] = ":"; static const size_t one = 1; char ha2[33]; cs_md5(ha2, method, method_len, colon, one, uri, uri_len, NULL); cs_md5(resp, ha1, ha1_len, colon, one, nonce, nonce_len, colon, one, nc, nc_len, colon, one, cnonce, cnonce_len, colon, one, qop, qop_len, colon, one, ha2, sizeof(ha2) - 1, NULL); } int mg_http_create_digest_auth_header(char *buf, size_t buf_len, const char *method, const char *uri, const char *auth_domain, const char *user, const char *passwd) { static const char colon[] = ":", qop[] = "auth"; static const size_t one = 1; char ha1[33], resp[33], cnonce[40]; snprintf(cnonce, sizeof(cnonce), "%x", (unsigned int) mg_time()); cs_md5(ha1, user, (size_t) strlen(user), colon, one, auth_domain, (size_t) strlen(auth_domain), colon, one, passwd, (size_t) strlen(passwd), NULL); mg_mkmd5resp(method, strlen(method), uri, strlen(uri), ha1, sizeof(ha1) - 1, cnonce, strlen(cnonce), "1", one, cnonce, strlen(cnonce), qop, sizeof(qop) - 1, resp); return snprintf(buf, buf_len, "Authorization: Digest username=\"%s\"," "realm=\"%s\",uri=\"%s\",qop=%s,nc=1,cnonce=%s," "nonce=%s,response=%s\r\n", user, auth_domain, uri, qop, cnonce, cnonce, resp); } /* * Check for authentication timeout. * Clients send time stamp encoded in nonce. Make sure it is not too old, * to prevent replay attacks. * Assumption: nonce is a hexadecimal number of seconds since 1970. */ static int mg_check_nonce(const char *nonce) { unsigned long now = (unsigned long) mg_time(); unsigned long val = (unsigned long) strtoul(nonce, NULL, 16); return now < val || now - val < 3600; } int mg_http_check_digest_auth(struct http_message *hm, const char *auth_domain, FILE *fp) { struct mg_str *hdr; char buf[128], f_user[sizeof(buf)], f_ha1[sizeof(buf)], f_domain[sizeof(buf)]; char user[50], cnonce[33], response[40], uri[200], qop[20], nc[20], nonce[30]; char expected_response[33]; /* Parse "Authorization:" header, fail fast on parse error */ if (hm == NULL || fp == NULL || (hdr = mg_get_http_header(hm, "Authorization")) == NULL || mg_http_parse_header(hdr, "username", user, sizeof(user)) == 0 || mg_http_parse_header(hdr, "cnonce", cnonce, sizeof(cnonce)) == 0 || mg_http_parse_header(hdr, "response", response, sizeof(response)) == 0 || mg_http_parse_header(hdr, "uri", uri, sizeof(uri)) == 0 || mg_http_parse_header(hdr, "qop", qop, sizeof(qop)) == 0 || mg_http_parse_header(hdr, "nc", nc, sizeof(nc)) == 0 || mg_http_parse_header(hdr, "nonce", nonce, sizeof(nonce)) == 0 || mg_check_nonce(nonce) == 0) { return 0; } /* * Read passwords file line by line. If should have htdigest format, * i.e. each line should be a colon-separated sequence: * USER_NAME:DOMAIN_NAME:HA1_HASH_OF_USER_DOMAIN_AND_PASSWORD */ while (fgets(buf, sizeof(buf), fp) != NULL) { if (sscanf(buf, "%[^:]:%[^:]:%s", f_user, f_domain, f_ha1) == 3 && strcmp(user, f_user) == 0 && /* NOTE(lsm): due to a bug in MSIE, we do not compare URIs */ strcmp(auth_domain, f_domain) == 0) { /* User and domain matched, check the password */ mg_mkmd5resp( hm->method.p, hm->method.len, hm->uri.p, hm->uri.len + (hm->query_string.len ? hm->query_string.len + 1 : 0), f_ha1, strlen(f_ha1), nonce, strlen(nonce), nc, strlen(nc), cnonce, strlen(cnonce), qop, strlen(qop), expected_response); return mg_casecmp(response, expected_response) == 0; } } /* None of the entries in the passwords file matched - return failure */ return 0; } static int mg_is_authorized(struct http_message *hm, const char *path, int is_directory, const char *domain, const char *passwords_file, int is_global_pass_file) { char buf[MG_MAX_PATH]; const char *p; FILE *fp; int authorized = 1; if (domain != NULL && passwords_file != NULL) { if (is_global_pass_file) { fp = mg_fopen(passwords_file, "r"); } else if (is_directory) { snprintf(buf, sizeof(buf), "%s%c%s", path, DIRSEP, passwords_file); fp = mg_fopen(buf, "r"); } else { p = strrchr(path, DIRSEP); if (p == NULL) p = path; snprintf(buf, sizeof(buf), "%.*s%c%s", (int) (p - path), path, DIRSEP, passwords_file); fp = mg_fopen(buf, "r"); } if (fp != NULL) { authorized = mg_http_check_digest_auth(hm, domain, fp); fclose(fp); } } LOG(LL_DEBUG, ("%s '%s' %d %d", path, passwords_file ? passwords_file : "", is_global_pass_file, authorized)); return authorized; } #else static int mg_is_authorized(struct http_message *hm, const char *path, int is_directory, const char *domain, const char *passwords_file, int is_global_pass_file) { (void) hm; (void) path; (void) is_directory; (void) domain; (void) passwords_file; (void) is_global_pass_file; return 1; } #endif #if MG_ENABLE_DIRECTORY_LISTING static size_t mg_url_encode(const char *src, size_t s_len, char *dst, size_t dst_len) { static const char *dont_escape = "._-$,;~()/"; static const char *hex = "0123456789abcdef"; size_t i = 0, j = 0; for (i = j = 0; dst_len > 0 && i < s_len && j + 2 < dst_len - 1; i++, j++) { if (isalnum(*(const unsigned char *) (src + i)) || strchr(dont_escape, *(const unsigned char *) (src + i)) != NULL) { dst[j] = src[i]; } else if (j + 3 < dst_len) { dst[j] = '%'; dst[j + 1] = hex[(*(const unsigned char *) (src + i)) >> 4]; dst[j + 2] = hex[(*(const unsigned char *) (src + i)) & 0xf]; j += 2; } } dst[j] = '\0'; return j; } static void mg_escape(const char *src, char *dst, size_t dst_len) { size_t n = 0; while (*src != '\0' && n + 5 < dst_len) { unsigned char ch = *(unsigned char *) src++; if (ch == '<') { n += snprintf(dst + n, dst_len - n, "%s", "&lt;"); } else { dst[n++] = ch; } } dst[n] = '\0'; } static void mg_print_dir_entry(struct mg_connection *nc, const char *file_name, cs_stat_t *stp) { char size[64], mod[64], href[MAX_PATH_SIZE * 3], path[MAX_PATH_SIZE]; int64_t fsize = stp->st_size; int is_dir = S_ISDIR(stp->st_mode); const char *slash = is_dir ? "/" : ""; if (is_dir) { snprintf(size, sizeof(size), "%s", "[DIRECTORY]"); } else { /* * We use (double) cast below because MSVC 6 compiler cannot * convert unsigned __int64 to double. */ if (fsize < 1024) { snprintf(size, sizeof(size), "%d", (int) fsize); } else if (fsize < 0x100000) { snprintf(size, sizeof(size), "%.1fk", (double) fsize / 1024.0); } else if (fsize < 0x40000000) { snprintf(size, sizeof(size), "%.1fM", (double) fsize / 1048576); } else { snprintf(size, sizeof(size), "%.1fG", (double) fsize / 1073741824); } } strftime(mod, sizeof(mod), "%d-%b-%Y %H:%M", localtime(&stp->st_mtime)); mg_escape(file_name, path, sizeof(path)); mg_url_encode(file_name, strlen(file_name), href, sizeof(href)); mg_printf_http_chunk(nc, "<tr><td><a href=\"%s%s\">%s%s</a></td>" "<td>%s</td><td name=%" INT64_FMT ">%s</td></tr>\n", href, slash, path, slash, mod, is_dir ? -1 : fsize, size); } static void mg_scan_directory(struct mg_connection *nc, const char *dir, const struct mg_serve_http_opts *opts, void (*func)(struct mg_connection *, const char *, cs_stat_t *)) { char path[MAX_PATH_SIZE]; cs_stat_t st; struct dirent *dp; DIR *dirp; LOG(LL_DEBUG, ("%p [%s]", nc, dir)); if ((dirp = (opendir(dir))) != NULL) { while ((dp = readdir(dirp)) != NULL) { /* Do not show current dir and hidden files */ if (mg_is_file_hidden((const char *) dp->d_name, opts, 1)) { continue; } snprintf(path, sizeof(path), "%s/%s", dir, dp->d_name); if (mg_stat(path, &st) == 0) { func(nc, (const char *) dp->d_name, &st); } } closedir(dirp); } else { LOG(LL_DEBUG, ("%p opendir(%s) -> %d", nc, dir, mg_get_errno())); } } static void mg_send_directory_listing(struct mg_connection *nc, const char *dir, struct http_message *hm, struct mg_serve_http_opts *opts) { static const char *sort_js_code = "<script>function srt(tb, sc, so, d) {" "var tr = Array.prototype.slice.call(tb.rows, 0)," "tr = tr.sort(function (a, b) { var c1 = a.cells[sc], c2 = b.cells[sc]," "n1 = c1.getAttribute('name'), n2 = c2.getAttribute('name'), " "t1 = a.cells[2].getAttribute('name'), " "t2 = b.cells[2].getAttribute('name'); " "return so * (t1 < 0 && t2 >= 0 ? -1 : t2 < 0 && t1 >= 0 ? 1 : " "n1 ? parseInt(n2) - parseInt(n1) : " "c1.textContent.trim().localeCompare(c2.textContent.trim())); });"; static const char *sort_js_code2 = "for (var i = 0; i < tr.length; i++) tb.appendChild(tr[i]); " "if (!d) window.location.hash = ('sc=' + sc + '&so=' + so); " "};" "window.onload = function() {" "var tb = document.getElementById('tb');" "var m = /sc=([012]).so=(1|-1)/.exec(window.location.hash) || [0, 2, 1];" "var sc = m[1], so = m[2]; document.onclick = function(ev) { " "var c = ev.target.rel; if (c) {if (c == sc) so *= -1; srt(tb, c, so); " "sc = c; ev.preventDefault();}};" "srt(tb, sc, so, true);" "}" "</script>"; mg_send_response_line(nc, 200, opts->extra_headers); mg_printf(nc, "%s: %s\r\n%s: %s\r\n\r\n", "Transfer-Encoding", "chunked", "Content-Type", "text/html; charset=utf-8"); mg_printf_http_chunk( nc, "<html><head><title>Index of %.*s</title>%s%s" "<style>th,td {text-align: left; padding-right: 1em; " "font-family: monospace; }</style></head>\n" "<body><h1>Index of %.*s</h1>\n<table cellpadding=0><thead>" "<tr><th><a href=# rel=0>Name</a></th><th>" "<a href=# rel=1>Modified</a</th>" "<th><a href=# rel=2>Size</a></th></tr>" "<tr><td colspan=3><hr></td></tr>\n" "</thead>\n" "<tbody id=tb>", (int) hm->uri.len, hm->uri.p, sort_js_code, sort_js_code2, (int) hm->uri.len, hm->uri.p); mg_scan_directory(nc, dir, opts, mg_print_dir_entry); mg_printf_http_chunk(nc, "</tbody><tr><td colspan=3><hr></td></tr>\n" "</table>\n" "<address>%s</address>\n" "</body></html>", mg_version_header); mg_send_http_chunk(nc, "", 0); /* TODO(rojer): Remove when cesanta/dev/issues/197 is fixed. */ nc->flags |= MG_F_SEND_AND_CLOSE; } #endif /* MG_ENABLE_DIRECTORY_LISTING */ /* * Given a directory path, find one of the files specified in the * comma-separated list of index files `list`. * First found index file wins. If an index file is found, then gets * appended to the `path`, stat-ed, and result of `stat()` passed to `stp`. * If index file is not found, then `path` and `stp` remain unchanged. */ MG_INTERNAL void mg_find_index_file(const char *path, const char *list, char **index_file, cs_stat_t *stp) { struct mg_str vec; size_t path_len = strlen(path); int found = 0; *index_file = NULL; /* Traverse index files list. For each entry, append it to the given */ /* path and see if the file exists. If it exists, break the loop */ while ((list = mg_next_comma_list_entry(list, &vec, NULL)) != NULL) { cs_stat_t st; size_t len = path_len + 1 + vec.len + 1; *index_file = (char *) MG_REALLOC(*index_file, len); if (*index_file == NULL) break; snprintf(*index_file, len, "%s%c%.*s", path, DIRSEP, (int) vec.len, vec.p); /* Does it exist? Is it a file? */ if (mg_stat(*index_file, &st) == 0 && S_ISREG(st.st_mode)) { /* Yes it does, break the loop */ *stp = st; found = 1; break; } } if (!found) { MG_FREE(*index_file); *index_file = NULL; } LOG(LL_DEBUG, ("[%s] [%s]", path, (*index_file ? *index_file : ""))); } #if MG_ENABLE_HTTP_URL_REWRITES static int mg_http_send_port_based_redirect( struct mg_connection *c, struct http_message *hm, const struct mg_serve_http_opts *opts) { const char *rewrites = opts->url_rewrites; struct mg_str a, b; char local_port[20] = {'%'}; mg_conn_addr_to_str(c, local_port + 1, sizeof(local_port) - 1, MG_SOCK_STRINGIFY_PORT); while ((rewrites = mg_next_comma_list_entry(rewrites, &a, &b)) != NULL) { if (mg_vcmp(&a, local_port) == 0) { mg_send_response_line(c, 301, NULL); mg_printf(c, "Content-Length: 0\r\nLocation: %.*s%.*s\r\n\r\n", (int) b.len, b.p, (int) (hm->proto.p - hm->uri.p - 1), hm->uri.p); return 1; } } return 0; } static void mg_reverse_proxy_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { struct http_message *hm = (struct http_message *) ev_data; struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); if (pd == NULL || pd->reverse_proxy_data.linked_conn == NULL) { DBG(("%p: upstream closed", nc)); return; } switch (ev) { case MG_EV_CONNECT: if (*(int *) ev_data != 0) { mg_http_send_error(pd->reverse_proxy_data.linked_conn, 502, NULL); } break; /* TODO(mkm): handle streaming */ case MG_EV_HTTP_REPLY: mg_send(pd->reverse_proxy_data.linked_conn, hm->message.p, hm->message.len); pd->reverse_proxy_data.linked_conn->flags |= MG_F_SEND_AND_CLOSE; nc->flags |= MG_F_CLOSE_IMMEDIATELY; break; case MG_EV_CLOSE: pd->reverse_proxy_data.linked_conn->flags |= MG_F_SEND_AND_CLOSE; break; } #if MG_ENABLE_CALLBACK_USERDATA (void) user_data; #endif } void mg_http_reverse_proxy(struct mg_connection *nc, const struct http_message *hm, struct mg_str mount, struct mg_str upstream) { struct mg_connection *be; char burl[256], *purl = burl; char *addr = NULL; const char *path = NULL; int i; const char *error; struct mg_connect_opts opts; memset(&opts, 0, sizeof(opts)); opts.error_string = &error; mg_asprintf(&purl, sizeof(burl), "%.*s%.*s", (int) upstream.len, upstream.p, (int) (hm->uri.len - mount.len), hm->uri.p + mount.len); be = mg_connect_http_base(nc->mgr, MG_CB(mg_reverse_proxy_handler, NULL), opts, "http://", "https://", purl, &path, NULL /* user */, NULL /* pass */, &addr); LOG(LL_DEBUG, ("Proxying %.*s to %s (rule: %.*s)", (int) hm->uri.len, hm->uri.p, purl, (int) mount.len, mount.p)); if (be == NULL) { LOG(LL_ERROR, ("Error connecting to %s: %s", purl, error)); mg_http_send_error(nc, 502, NULL); goto cleanup; } /* link connections to each other, they must live and die together */ mg_http_get_proto_data(be)->reverse_proxy_data.linked_conn = nc; mg_http_get_proto_data(nc)->reverse_proxy_data.linked_conn = be; /* send request upstream */ mg_printf(be, "%.*s %s HTTP/1.1\r\n", (int) hm->method.len, hm->method.p, path); mg_printf(be, "Host: %s\r\n", addr); for (i = 0; i < MG_MAX_HTTP_HEADERS && hm->header_names[i].len > 0; i++) { struct mg_str hn = hm->header_names[i]; struct mg_str hv = hm->header_values[i]; /* we rewrite the host header */ if (mg_vcasecmp(&hn, "Host") == 0) continue; /* * Don't pass chunked transfer encoding to the client because hm->body is * already dechunked when we arrive here. */ if (mg_vcasecmp(&hn, "Transfer-encoding") == 0 && mg_vcasecmp(&hv, "chunked") == 0) { mg_printf(be, "Content-Length: %" SIZE_T_FMT "\r\n", hm->body.len); continue; } /* We don't support proxying Expect: 100-continue. */ if (mg_vcasecmp(&hn, "Expect") == 0 && mg_vcasecmp(&hv, "100-continue") == 0) { continue; } mg_printf(be, "%.*s: %.*s\r\n", (int) hn.len, hn.p, (int) hv.len, hv.p); } mg_send(be, "\r\n", 2); mg_send(be, hm->body.p, hm->body.len); cleanup: if (purl != burl) MG_FREE(purl); } static int mg_http_handle_forwarding(struct mg_connection *nc, struct http_message *hm, const struct mg_serve_http_opts *opts) { const char *rewrites = opts->url_rewrites; struct mg_str a, b; struct mg_str p1 = MG_MK_STR("http://"), p2 = MG_MK_STR("https://"); while ((rewrites = mg_next_comma_list_entry(rewrites, &a, &b)) != NULL) { if (mg_strncmp(a, hm->uri, a.len) == 0) { if (mg_strncmp(b, p1, p1.len) == 0 || mg_strncmp(b, p2, p2.len) == 0) { mg_http_reverse_proxy(nc, hm, a, b); return 1; } } } return 0; } #endif MG_INTERNAL int mg_uri_to_local_path(struct http_message *hm, const struct mg_serve_http_opts *opts, char **local_path, struct mg_str *remainder) { int ok = 1; const char *cp = hm->uri.p, *cp_end = hm->uri.p + hm->uri.len; struct mg_str root = {NULL, 0}; const char *file_uri_start = cp; *local_path = NULL; remainder->p = NULL; remainder->len = 0; { /* 1. Determine which root to use. */ #if MG_ENABLE_HTTP_URL_REWRITES const char *rewrites = opts->url_rewrites; #else const char *rewrites = ""; #endif struct mg_str *hh = mg_get_http_header(hm, "Host"); struct mg_str a, b; /* Check rewrites first. */ while ((rewrites = mg_next_comma_list_entry(rewrites, &a, &b)) != NULL) { if (a.len > 1 && a.p[0] == '@') { /* Host rewrite. */ if (hh != NULL && hh->len == a.len - 1 && mg_ncasecmp(a.p + 1, hh->p, a.len - 1) == 0) { root = b; break; } } else { /* Regular rewrite, URI=directory */ int match_len = mg_match_prefix_n(a, hm->uri); if (match_len > 0) { file_uri_start = hm->uri.p + match_len; if (*file_uri_start == '/' || file_uri_start == cp_end) { /* Match ended at component boundary, ok. */ } else if (*(file_uri_start - 1) == '/') { /* Pattern ends with '/', backtrack. */ file_uri_start--; } else { /* No match: must fall on the component boundary. */ continue; } root = b; break; } } } /* If no rewrite rules matched, use DAV or regular document root. */ if (root.p == NULL) { #if MG_ENABLE_HTTP_WEBDAV if (opts->dav_document_root != NULL && mg_is_dav_request(&hm->method)) { root.p = opts->dav_document_root; root.len = strlen(opts->dav_document_root); } else #endif { root.p = opts->document_root; root.len = strlen(opts->document_root); } } assert(root.p != NULL && root.len > 0); } { /* 2. Find where in the canonical URI path the local path ends. */ const char *u = file_uri_start + 1; char *lp = (char *) MG_MALLOC(root.len + hm->uri.len + 1); char *lp_end = lp + root.len + hm->uri.len + 1; char *p = lp, *ps; int exists = 1; if (lp == NULL) { ok = 0; goto out; } memcpy(p, root.p, root.len); p += root.len; if (*(p - 1) == DIRSEP) p--; *p = '\0'; ps = p; /* Chop off URI path components one by one and build local path. */ while (u <= cp_end) { const char *next = u; struct mg_str component; if (exists) { cs_stat_t st; exists = (mg_stat(lp, &st) == 0); if (exists && S_ISREG(st.st_mode)) { /* We found the terminal, the rest of the URI (if any) is path_info. */ if (*(u - 1) == '/') u--; break; } } if (u >= cp_end) break; parse_uri_component((const char **) &next, cp_end, '/', &component); if (component.len > 0) { int len; memmove(p + 1, component.p, component.len); len = mg_url_decode(p + 1, component.len, p + 1, lp_end - p - 1, 0); if (len <= 0) { ok = 0; break; } component.p = p + 1; component.len = len; if (mg_vcmp(&component, ".") == 0) { /* Yum. */ } else if (mg_vcmp(&component, "..") == 0) { while (p > ps && *p != DIRSEP) p--; *p = '\0'; } else { size_t i; #ifdef _WIN32 /* On Windows, make sure it's valid Unicode (no funny stuff). */ wchar_t buf[MG_MAX_PATH * 2]; if (to_wchar(component.p, buf, MG_MAX_PATH) == 0) { DBG(("[%.*s] smells funny", (int) component.len, component.p)); ok = 0; break; } #endif *p++ = DIRSEP; /* No NULs and DIRSEPs in the component (percent-encoded). */ for (i = 0; i < component.len; i++, p++) { if (*p == '\0' || *p == DIRSEP #ifdef _WIN32 /* On Windows, "/" is also accepted, so check for that too. */ || *p == '/' #endif ) { ok = 0; break; } } } } u = next; } if (ok) { *local_path = lp; if (u > cp_end) u = cp_end; remainder->p = u; remainder->len = cp_end - u; } else { MG_FREE(lp); } } out: LOG(LL_DEBUG, ("'%.*s' -> '%s' + '%.*s'", (int) hm->uri.len, hm->uri.p, *local_path ? *local_path : "", (int) remainder->len, remainder->p)); return ok; } static int mg_get_month_index(const char *s) { static const char *month_names[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; size_t i; for (i = 0; i < ARRAY_SIZE(month_names); i++) if (!strcmp(s, month_names[i])) return (int) i; return -1; } static int mg_num_leap_years(int year) { return year / 4 - year / 100 + year / 400; } /* Parse UTC date-time string, and return the corresponding time_t value. */ MG_INTERNAL time_t mg_parse_date_string(const char *datetime) { static const unsigned short days_before_month[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334}; char month_str[32]; int second, minute, hour, day, month, year, leap_days, days; time_t result = (time_t) 0; if (((sscanf(datetime, "%d/%3s/%d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6) || (sscanf(datetime, "%d %3s %d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6) || (sscanf(datetime, "%*3s, %d %3s %d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6) || (sscanf(datetime, "%d-%3s-%d %d:%d:%d", &day, month_str, &year, &hour, &minute, &second) == 6)) && year > 1970 && (month = mg_get_month_index(month_str)) != -1) { leap_days = mg_num_leap_years(year) - mg_num_leap_years(1970); year -= 1970; days = year * 365 + days_before_month[month] + (day - 1) + leap_days; result = days * 24 * 3600 + hour * 3600 + minute * 60 + second; } return result; } MG_INTERNAL int mg_is_not_modified(struct http_message *hm, cs_stat_t *st) { struct mg_str *hdr; if ((hdr = mg_get_http_header(hm, "If-None-Match")) != NULL) { char etag[64]; mg_http_construct_etag(etag, sizeof(etag), st); return mg_vcasecmp(hdr, etag) == 0; } else if ((hdr = mg_get_http_header(hm, "If-Modified-Since")) != NULL) { return st->st_mtime <= mg_parse_date_string(hdr->p); } else { return 0; } } static void mg_http_send_digest_auth_request(struct mg_connection *c, const char *domain) { mg_printf(c, "HTTP/1.1 401 Unauthorized\r\n" "WWW-Authenticate: Digest qop=\"auth\", " "realm=\"%s\", nonce=\"%lu\"\r\n" "Content-Length: 0\r\n\r\n", domain, (unsigned long) mg_time()); } static void mg_http_send_options(struct mg_connection *nc) { mg_printf(nc, "%s", "HTTP/1.1 200 OK\r\nAllow: GET, POST, HEAD, CONNECT, OPTIONS" #if MG_ENABLE_HTTP_WEBDAV ", MKCOL, PUT, DELETE, PROPFIND, MOVE\r\nDAV: 1,2" #endif "\r\n\r\n"); nc->flags |= MG_F_SEND_AND_CLOSE; } static int mg_is_creation_request(const struct http_message *hm) { return mg_vcmp(&hm->method, "MKCOL") == 0 || mg_vcmp(&hm->method, "PUT") == 0; } MG_INTERNAL void mg_send_http_file(struct mg_connection *nc, char *path, const struct mg_str *path_info, struct http_message *hm, struct mg_serve_http_opts *opts) { int exists, is_directory, is_cgi; #if MG_ENABLE_HTTP_WEBDAV int is_dav = mg_is_dav_request(&hm->method); #else int is_dav = 0; #endif char *index_file = NULL; cs_stat_t st; exists = (mg_stat(path, &st) == 0); is_directory = exists && S_ISDIR(st.st_mode); if (is_directory) mg_find_index_file(path, opts->index_files, &index_file, &st); is_cgi = (mg_match_prefix(opts->cgi_file_pattern, strlen(opts->cgi_file_pattern), index_file ? index_file : path) > 0); LOG(LL_DEBUG, ("%p %.*s [%s] exists=%d is_dir=%d is_dav=%d is_cgi=%d index=%s", nc, (int) hm->method.len, hm->method.p, path, exists, is_directory, is_dav, is_cgi, index_file ? index_file : "")); if (is_directory && hm->uri.p[hm->uri.len - 1] != '/' && !is_dav) { mg_printf(nc, "HTTP/1.1 301 Moved\r\nLocation: %.*s/\r\n" "Content-Length: 0\r\n\r\n", (int) hm->uri.len, hm->uri.p); MG_FREE(index_file); return; } /* If we have path_info, the only way to handle it is CGI. */ if (path_info->len > 0 && !is_cgi) { mg_http_send_error(nc, 501, NULL); MG_FREE(index_file); return; } if (is_dav && opts->dav_document_root == NULL) { mg_http_send_error(nc, 501, NULL); } else if (!mg_is_authorized(hm, path, is_directory, opts->auth_domain, opts->global_auth_file, 1) || !mg_is_authorized(hm, path, is_directory, opts->auth_domain, opts->per_directory_auth_file, 0)) { mg_http_send_digest_auth_request(nc, opts->auth_domain); } else if (is_cgi) { #if MG_ENABLE_HTTP_CGI mg_handle_cgi(nc, index_file ? index_file : path, path_info, hm, opts); #else mg_http_send_error(nc, 501, NULL); #endif /* MG_ENABLE_HTTP_CGI */ } else if ((!exists || mg_is_file_hidden(path, opts, 0 /* specials are ok */)) && !mg_is_creation_request(hm)) { mg_http_send_error(nc, 404, NULL); #if MG_ENABLE_HTTP_WEBDAV } else if (!mg_vcmp(&hm->method, "PROPFIND")) { mg_handle_propfind(nc, path, &st, hm, opts); #if !MG_DISABLE_DAV_AUTH } else if (is_dav && (opts->dav_auth_file == NULL || (strcmp(opts->dav_auth_file, "-") != 0 && !mg_is_authorized(hm, path, is_directory, opts->auth_domain, opts->dav_auth_file, 1)))) { mg_http_send_digest_auth_request(nc, opts->auth_domain); #endif } else if (!mg_vcmp(&hm->method, "MKCOL")) { mg_handle_mkcol(nc, path, hm); } else if (!mg_vcmp(&hm->method, "DELETE")) { mg_handle_delete(nc, opts, path); } else if (!mg_vcmp(&hm->method, "PUT")) { mg_handle_put(nc, path, hm); } else if (!mg_vcmp(&hm->method, "MOVE")) { mg_handle_move(nc, opts, path, hm); #if MG_ENABLE_FAKE_DAVLOCK } else if (!mg_vcmp(&hm->method, "LOCK")) { mg_handle_lock(nc, path); #endif #endif /* MG_ENABLE_HTTP_WEBDAV */ } else if (!mg_vcmp(&hm->method, "OPTIONS")) { mg_http_send_options(nc); } else if (is_directory && index_file == NULL) { #if MG_ENABLE_DIRECTORY_LISTING if (strcmp(opts->enable_directory_listing, "yes") == 0) { mg_send_directory_listing(nc, path, hm, opts); } else { mg_http_send_error(nc, 403, NULL); } #else mg_http_send_error(nc, 501, NULL); #endif } else if (mg_is_not_modified(hm, &st)) { mg_http_send_error(nc, 304, "Not Modified"); } else { mg_http_serve_file2(nc, index_file ? index_file : path, hm, opts); } MG_FREE(index_file); } void mg_serve_http(struct mg_connection *nc, struct http_message *hm, struct mg_serve_http_opts opts) { char *path = NULL; struct mg_str *hdr, path_info; uint32_t remote_ip = ntohl(*(uint32_t *) &nc->sa.sin.sin_addr); if (mg_check_ip_acl(opts.ip_acl, remote_ip) != 1) { /* Not allowed to connect */ mg_http_send_error(nc, 403, NULL); nc->flags |= MG_F_SEND_AND_CLOSE; return; } #if MG_ENABLE_HTTP_URL_REWRITES if (mg_http_handle_forwarding(nc, hm, &opts)) { return; } if (mg_http_send_port_based_redirect(nc, hm, &opts)) { return; } #endif if (opts.document_root == NULL) { opts.document_root = "."; } if (opts.per_directory_auth_file == NULL) { opts.per_directory_auth_file = ".htpasswd"; } if (opts.enable_directory_listing == NULL) { opts.enable_directory_listing = "yes"; } if (opts.cgi_file_pattern == NULL) { opts.cgi_file_pattern = "**.cgi$|**.php$"; } if (opts.ssi_pattern == NULL) { opts.ssi_pattern = "**.shtml$|**.shtm$"; } if (opts.index_files == NULL) { opts.index_files = "index.html,index.htm,index.shtml,index.cgi,index.php"; } /* Normalize path - resolve "." and ".." (in-place). */ if (!mg_normalize_uri_path(&hm->uri, &hm->uri)) { mg_http_send_error(nc, 400, NULL); return; } if (mg_uri_to_local_path(hm, &opts, &path, &path_info) == 0) { mg_http_send_error(nc, 404, NULL); return; } mg_send_http_file(nc, path, &path_info, hm, &opts); MG_FREE(path); path = NULL; /* Close connection for non-keep-alive requests */ if (mg_vcmp(&hm->proto, "HTTP/1.1") != 0 || ((hdr = mg_get_http_header(hm, "Connection")) != NULL && mg_vcmp(hdr, "keep-alive") != 0)) { #if 0 nc->flags |= MG_F_SEND_AND_CLOSE; #endif } } #if MG_ENABLE_HTTP_STREAMING_MULTIPART void mg_file_upload_handler(struct mg_connection *nc, int ev, void *ev_data, mg_fu_fname_fn local_name_fn MG_UD_ARG(void *user_data)) { switch (ev) { case MG_EV_HTTP_PART_BEGIN: { struct mg_http_multipart_part *mp = (struct mg_http_multipart_part *) ev_data; struct file_upload_state *fus = (struct file_upload_state *) MG_CALLOC(1, sizeof(*fus)); struct mg_str lfn = local_name_fn(nc, mg_mk_str(mp->file_name)); mp->user_data = NULL; if (lfn.p == NULL || lfn.len == 0) { LOG(LL_ERROR, ("%p Not allowed to upload %s", nc, mp->file_name)); mg_printf(nc, "HTTP/1.1 403 Not Allowed\r\n" "Content-Type: text/plain\r\n" "Connection: close\r\n\r\n" "Not allowed to upload %s\r\n", mp->file_name); nc->flags |= MG_F_SEND_AND_CLOSE; return; } fus->lfn = (char *) MG_MALLOC(lfn.len + 1); memcpy(fus->lfn, lfn.p, lfn.len); fus->lfn[lfn.len] = '\0'; if (lfn.p != mp->file_name) MG_FREE((char *) lfn.p); LOG(LL_DEBUG, ("%p Receiving file %s -> %s", nc, mp->file_name, fus->lfn)); fus->fp = mg_fopen(fus->lfn, "w"); if (fus->fp == NULL) { mg_printf(nc, "HTTP/1.1 500 Internal Server Error\r\n" "Content-Type: text/plain\r\n" "Connection: close\r\n\r\n"); LOG(LL_ERROR, ("Failed to open %s: %d\n", fus->lfn, mg_get_errno())); mg_printf(nc, "Failed to open %s: %d\n", fus->lfn, mg_get_errno()); /* Do not close the connection just yet, discard remainder of the data. * This is because at the time of writing some browsers (Chrome) fail to * render response before all the data is sent. */ } mp->user_data = (void *) fus; break; } case MG_EV_HTTP_PART_DATA: { struct mg_http_multipart_part *mp = (struct mg_http_multipart_part *) ev_data; struct file_upload_state *fus = (struct file_upload_state *) mp->user_data; if (fus == NULL || fus->fp == NULL) break; if (mg_fwrite(mp->data.p, 1, mp->data.len, fus->fp) != mp->data.len) { LOG(LL_ERROR, ("Failed to write to %s: %d, wrote %d", fus->lfn, mg_get_errno(), (int) fus->num_recd)); if (mg_get_errno() == ENOSPC #ifdef SPIFFS_ERR_FULL || mg_get_errno() == SPIFFS_ERR_FULL #endif ) { mg_printf(nc, "HTTP/1.1 413 Payload Too Large\r\n" "Content-Type: text/plain\r\n" "Connection: close\r\n\r\n"); mg_printf(nc, "Failed to write to %s: no space left; wrote %d\r\n", fus->lfn, (int) fus->num_recd); } else { mg_printf(nc, "HTTP/1.1 500 Internal Server Error\r\n" "Content-Type: text/plain\r\n" "Connection: close\r\n\r\n"); mg_printf(nc, "Failed to write to %s: %d, wrote %d", mp->file_name, mg_get_errno(), (int) fus->num_recd); } fclose(fus->fp); remove(fus->lfn); fus->fp = NULL; /* Do not close the connection just yet, discard remainder of the data. * This is because at the time of writing some browsers (Chrome) fail to * render response before all the data is sent. */ return; } fus->num_recd += mp->data.len; LOG(LL_DEBUG, ("%p rec'd %d bytes, %d total", nc, (int) mp->data.len, (int) fus->num_recd)); break; } case MG_EV_HTTP_PART_END: { struct mg_http_multipart_part *mp = (struct mg_http_multipart_part *) ev_data; struct file_upload_state *fus = (struct file_upload_state *) mp->user_data; if (fus == NULL) break; if (mp->status >= 0 && fus->fp != NULL) { LOG(LL_DEBUG, ("%p Uploaded %s (%s), %d bytes", nc, mp->file_name, fus->lfn, (int) fus->num_recd)); mg_printf(nc, "HTTP/1.1 200 OK\r\n" "Content-Type: text/plain\r\n" "Connection: close\r\n\r\n" "Ok, %s - %d bytes.\r\n", mp->file_name, (int) fus->num_recd); } else { LOG(LL_ERROR, ("Failed to store %s (%s)", mp->file_name, fus->lfn)); /* * mp->status < 0 means connection was terminated, so no reason to send * HTTP reply */ } if (fus->fp != NULL) fclose(fus->fp); MG_FREE(fus->lfn); MG_FREE(fus); mp->user_data = NULL; nc->flags |= MG_F_SEND_AND_CLOSE; break; } } #if MG_ENABLE_CALLBACK_USERDATA (void) user_data; #endif } #endif /* MG_ENABLE_HTTP_STREAMING_MULTIPART */ #endif /* MG_ENABLE_FILESYSTEM */ /* returns 0 on success, -1 on error */ MG_INTERNAL int mg_http_common_url_parse(const char *url, const char *schema, const char *schema_tls, int *use_ssl, char **user, char **pass, char **addr, int *port_i, const char **path) { int addr_len = 0; int auth_sep_pos = -1; int user_sep_pos = -1; int port_pos = -1; (void) user; (void) pass; if (strncmp(url, schema, strlen(schema)) == 0) { url += strlen(schema); } else if (strncmp(url, schema_tls, strlen(schema_tls)) == 0) { url += strlen(schema_tls); *use_ssl = 1; #if !MG_ENABLE_SSL return -1; /* SSL is not enabled, cannot do HTTPS URLs */ #endif } while (*url != '\0') { *addr = (char *) MG_REALLOC(*addr, addr_len + 6 /* space for port too. */); if (*addr == NULL) { DBG(("OOM")); return -1; } if (*url == '/') { break; } if (*url == '@') { auth_sep_pos = addr_len; user_sep_pos = port_pos; port_pos = -1; } if (*url == ':') port_pos = addr_len; (*addr)[addr_len++] = *url; (*addr)[addr_len] = '\0'; url++; } if (addr_len == 0) goto cleanup; if (port_pos < 0) { *port_i = addr_len; addr_len += sprintf(*addr + addr_len, ":%d", *use_ssl ? 443 : 80); } else { *port_i = -1; } if (*path == NULL) *path = url; if (**path == '\0') *path = "/"; if (user != NULL && pass != NULL) { if (auth_sep_pos == -1) { *user = NULL; *pass = NULL; } else { /* user is from 0 to user_sep_pos */ *user = (char *) MG_MALLOC(user_sep_pos + 1); memcpy(*user, *addr, user_sep_pos); (*user)[user_sep_pos] = '\0'; /* pass is from user_sep_pos + 1 to auth_sep_pos */ *pass = (char *) MG_MALLOC(auth_sep_pos - user_sep_pos - 1 + 1); memcpy(*pass, *addr + user_sep_pos + 1, auth_sep_pos - user_sep_pos - 1); (*pass)[auth_sep_pos - user_sep_pos - 1] = '\0'; /* move address proper to the front */ memmove(*addr, *addr + auth_sep_pos + 1, addr_len - auth_sep_pos); } } DBG(("%s %s", *addr, *path)); return 0; cleanup: MG_FREE(*addr); return -1; } struct mg_connection *mg_connect_http_base( struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data), struct mg_connect_opts opts, const char *schema, const char *schema_ssl, const char *url, const char **path, char **user, char **pass, char **addr) { struct mg_connection *nc = NULL; int port_i = -1; int use_ssl = 0; if (mg_http_common_url_parse(url, schema, schema_ssl, &use_ssl, user, pass, addr, &port_i, path) < 0) { MG_SET_PTRPTR(opts.error_string, "cannot parse url"); return NULL; } LOG(LL_DEBUG, ("%s use_ssl? %d", url, use_ssl)); if (use_ssl) { #if MG_ENABLE_SSL /* * Schema requires SSL, but no SSL parameters were provided in opts. * In order to maintain backward compatibility, use a faux-SSL with no * verification. */ if (opts.ssl_ca_cert == NULL) { opts.ssl_ca_cert = "*"; } #else MG_SET_PTRPTR(opts.error_string, "ssl is disabled"); if (user != NULL) MG_FREE(*user); if (pass != NULL) MG_FREE(*pass); MG_FREE(*addr); return NULL; #endif } if ((nc = mg_connect_opt(mgr, *addr, MG_CB(ev_handler, user_data), opts)) != NULL) { mg_set_protocol_http_websocket(nc); /* If the port was addred by us, restore the original host. */ if (port_i >= 0) (*addr)[port_i] = '\0'; } return nc; } struct mg_connection *mg_connect_http_opt( struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data), struct mg_connect_opts opts, const char *url, const char *extra_headers, const char *post_data) { char *user = NULL, *pass = NULL, *addr = NULL; const char *path = NULL; struct mbuf auth; struct mg_connection *nc = mg_connect_http_base(mgr, MG_CB(ev_handler, user_data), opts, "http://", "https://", url, &path, &user, &pass, &addr); if (nc == NULL) { return NULL; } mbuf_init(&auth, 0); if (user != NULL) { mg_basic_auth_header(user, pass, &auth); } if (post_data == NULL) post_data = ""; if (extra_headers == NULL) extra_headers = ""; mg_printf(nc, "%s %s HTTP/1.1\r\nHost: %s\r\nContent-Length: %" SIZE_T_FMT "\r\n%.*s%s\r\n%s", post_data[0] == '\0' ? "GET" : "POST", path, addr, strlen(post_data), (int) auth.len, (auth.buf == NULL ? "" : auth.buf), extra_headers, post_data); mbuf_free(&auth); MG_FREE(user); MG_FREE(pass); MG_FREE(addr); return nc; } struct mg_connection *mg_connect_http( struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data), const char *url, const char *extra_headers, const char *post_data) { struct mg_connect_opts opts; memset(&opts, 0, sizeof(opts)); return mg_connect_http_opt(mgr, MG_CB(ev_handler, user_data), opts, url, extra_headers, post_data); } size_t mg_parse_multipart(const char *buf, size_t buf_len, char *var_name, size_t var_name_len, char *file_name, size_t file_name_len, const char **data, size_t *data_len) { static const char cd[] = "Content-Disposition: "; size_t hl, bl, n, ll, pos, cdl = sizeof(cd) - 1; if (buf == NULL || buf_len <= 0) return 0; if ((hl = mg_http_get_request_len(buf, buf_len)) <= 0) return 0; if (buf[0] != '-' || buf[1] != '-' || buf[2] == '\n') return 0; /* Get boundary length */ bl = mg_get_line_len(buf, buf_len); /* Loop through headers, fetch variable name and file name */ var_name[0] = file_name[0] = '\0'; for (n = bl; (ll = mg_get_line_len(buf + n, hl - n)) > 0; n += ll) { if (mg_ncasecmp(cd, buf + n, cdl) == 0) { struct mg_str header; header.p = buf + n + cdl; header.len = ll - (cdl + 2); mg_http_parse_header(&header, "name", var_name, var_name_len); mg_http_parse_header(&header, "filename", file_name, file_name_len); } } /* Scan through the body, search for terminating boundary */ for (pos = hl; pos + (bl - 2) < buf_len; pos++) { if (buf[pos] == '-' && !strncmp(buf, &buf[pos], bl - 2)) { if (data_len != NULL) *data_len = (pos - 2) - hl; if (data != NULL) *data = buf + hl; return pos; } } return 0; } void mg_register_http_endpoint(struct mg_connection *nc, const char *uri_path, MG_CB(mg_event_handler_t handler, void *user_data)) { struct mg_http_proto_data *pd = NULL; struct mg_http_endpoint *new_ep = NULL; if (nc == NULL) return; new_ep = (struct mg_http_endpoint *) MG_CALLOC(1, sizeof(*new_ep)); if (new_ep == NULL) return; pd = mg_http_get_proto_data(nc); new_ep->name = strdup(uri_path); new_ep->name_len = strlen(new_ep->name); new_ep->handler = handler; #if MG_ENABLE_CALLBACK_USERDATA new_ep->user_data = user_data; #endif new_ep->next = pd->endpoints; pd->endpoints = new_ep; } #endif /* MG_ENABLE_HTTP */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/http_cgi.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_HTTP && MG_ENABLE_HTTP_CGI #ifndef MG_MAX_CGI_ENVIR_VARS #define MG_MAX_CGI_ENVIR_VARS 64 #endif #ifndef MG_ENV_EXPORT_TO_CGI #define MG_ENV_EXPORT_TO_CGI "MONGOOSE_CGI" #endif /* * This structure helps to create an environment for the spawned CGI program. * Environment is an array of "VARIABLE=VALUE\0" ASCIIZ strings, * last element must be NULL. * However, on Windows there is a requirement that all these VARIABLE=VALUE\0 * strings must reside in a contiguous buffer. The end of the buffer is * marked by two '\0' characters. * We satisfy both worlds: we create an envp array (which is vars), all * entries are actually pointers inside buf. */ struct mg_cgi_env_block { struct mg_connection *nc; char buf[MG_CGI_ENVIRONMENT_SIZE]; /* Environment buffer */ const char *vars[MG_MAX_CGI_ENVIR_VARS]; /* char *envp[] */ int len; /* Space taken */ int nvars; /* Number of variables in envp[] */ }; #ifdef _WIN32 struct mg_threadparam { sock_t s; HANDLE hPipe; }; static int mg_wait_until_ready(sock_t sock, int for_read) { fd_set set; FD_ZERO(&set); FD_SET(sock, &set); return select(sock + 1, for_read ? &set : 0, for_read ? 0 : &set, 0, 0) == 1; } static void *mg_push_to_stdin(void *arg) { struct mg_threadparam *tp = (struct mg_threadparam *) arg; int n, sent, stop = 0; DWORD k; char buf[BUFSIZ]; while (!stop && mg_wait_until_ready(tp->s, 1) && (n = recv(tp->s, buf, sizeof(buf), 0)) > 0) { if (n == -1 && GetLastError() == WSAEWOULDBLOCK) continue; for (sent = 0; !stop && sent < n; sent += k) { if (!WriteFile(tp->hPipe, buf + sent, n - sent, &k, 0)) stop = 1; } } DBG(("%s", "FORWARED EVERYTHING TO CGI")); CloseHandle(tp->hPipe); MG_FREE(tp); return NULL; } static void *mg_pull_from_stdout(void *arg) { struct mg_threadparam *tp = (struct mg_threadparam *) arg; int k = 0, stop = 0; DWORD n, sent; char buf[BUFSIZ]; while (!stop && ReadFile(tp->hPipe, buf, sizeof(buf), &n, NULL)) { for (sent = 0; !stop && sent < n; sent += k) { if (mg_wait_until_ready(tp->s, 0) && (k = send(tp->s, buf + sent, n - sent, 0)) <= 0) stop = 1; } } DBG(("%s", "EOF FROM CGI")); CloseHandle(tp->hPipe); shutdown(tp->s, 2); // Without this, IO thread may get truncated data closesocket(tp->s); MG_FREE(tp); return NULL; } static void mg_spawn_stdio_thread(sock_t sock, HANDLE hPipe, void *(*func)(void *)) { struct mg_threadparam *tp = (struct mg_threadparam *) MG_MALLOC(sizeof(*tp)); if (tp != NULL) { tp->s = sock; tp->hPipe = hPipe; mg_start_thread(func, tp); } } static void mg_abs_path(const char *utf8_path, char *abs_path, size_t len) { wchar_t buf[MAX_PATH_SIZE], buf2[MAX_PATH_SIZE]; to_wchar(utf8_path, buf, ARRAY_SIZE(buf)); GetFullPathNameW(buf, ARRAY_SIZE(buf2), buf2, NULL); WideCharToMultiByte(CP_UTF8, 0, buf2, wcslen(buf2) + 1, abs_path, len, 0, 0); } static int mg_start_process(const char *interp, const char *cmd, const char *env, const char *envp[], const char *dir, sock_t sock) { STARTUPINFOW si; PROCESS_INFORMATION pi; HANDLE a[2], b[2], me = GetCurrentProcess(); wchar_t wcmd[MAX_PATH_SIZE], full_dir[MAX_PATH_SIZE]; char buf[MAX_PATH_SIZE], buf2[MAX_PATH_SIZE], buf5[MAX_PATH_SIZE], buf4[MAX_PATH_SIZE], cmdline[MAX_PATH_SIZE]; DWORD flags = DUPLICATE_CLOSE_SOURCE | DUPLICATE_SAME_ACCESS; FILE *fp; memset(&si, 0, sizeof(si)); memset(&pi, 0, sizeof(pi)); si.cb = sizeof(si); si.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; si.wShowWindow = SW_HIDE; si.hStdError = GetStdHandle(STD_ERROR_HANDLE); CreatePipe(&a[0], &a[1], NULL, 0); CreatePipe(&b[0], &b[1], NULL, 0); DuplicateHandle(me, a[0], me, &si.hStdInput, 0, TRUE, flags); DuplicateHandle(me, b[1], me, &si.hStdOutput, 0, TRUE, flags); if (interp == NULL && (fp = mg_fopen(cmd, "r")) != NULL) { buf[0] = buf[1] = '\0'; fgets(buf, sizeof(buf), fp); buf[sizeof(buf) - 1] = '\0'; if (buf[0] == '#' && buf[1] == '!') { interp = buf + 2; /* Trim leading spaces: https://github.com/cesanta/mongoose/issues/489 */ while (*interp != '\0' && isspace(*(unsigned char *) interp)) { interp++; } } fclose(fp); } snprintf(buf, sizeof(buf), "%s/%s", dir, cmd); mg_abs_path(buf, buf2, ARRAY_SIZE(buf2)); mg_abs_path(dir, buf5, ARRAY_SIZE(buf5)); to_wchar(dir, full_dir, ARRAY_SIZE(full_dir)); if (interp != NULL) { mg_abs_path(interp, buf4, ARRAY_SIZE(buf4)); snprintf(cmdline, sizeof(cmdline), "%s \"%s\"", buf4, buf2); } else { snprintf(cmdline, sizeof(cmdline), "\"%s\"", buf2); } to_wchar(cmdline, wcmd, ARRAY_SIZE(wcmd)); if (CreateProcessW(NULL, wcmd, NULL, NULL, TRUE, CREATE_NEW_PROCESS_GROUP, (void *) env, full_dir, &si, &pi) != 0) { mg_spawn_stdio_thread(sock, a[1], mg_push_to_stdin); mg_spawn_stdio_thread(sock, b[0], mg_pull_from_stdout); CloseHandle(si.hStdOutput); CloseHandle(si.hStdInput); CloseHandle(pi.hThread); CloseHandle(pi.hProcess); } else { CloseHandle(a[1]); CloseHandle(b[0]); closesocket(sock); } DBG(("CGI command: [%ls] -> %p", wcmd, pi.hProcess)); /* Not closing a[0] and b[1] because we've used DUPLICATE_CLOSE_SOURCE */ (void) envp; return (pi.hProcess != NULL); } #else static int mg_start_process(const char *interp, const char *cmd, const char *env, const char *envp[], const char *dir, sock_t sock) { char buf[500]; pid_t pid = fork(); (void) env; if (pid == 0) { /* * In Linux `chdir` declared with `warn_unused_result` attribute * To shutup compiler we have yo use result in some way */ int tmp = chdir(dir); (void) tmp; (void) dup2(sock, 0); (void) dup2(sock, 1); closesocket(sock); /* * After exec, all signal handlers are restored to their default values, * with one exception of SIGCHLD. According to POSIX.1-2001 and Linux's * implementation, SIGCHLD's handler will leave unchanged after exec * if it was set to be ignored. Restore it to default action. */ signal(SIGCHLD, SIG_DFL); if (interp == NULL) { execle(cmd, cmd, (char *) 0, envp); /* (char *) 0 to squash warning */ } else { execle(interp, interp, cmd, (char *) 0, envp); } snprintf(buf, sizeof(buf), "Status: 500\r\n\r\n" "500 Server Error: %s%s%s: %s", interp == NULL ? "" : interp, interp == NULL ? "" : " ", cmd, strerror(errno)); send(1, buf, strlen(buf), 0); _exit(EXIT_FAILURE); /* exec call failed */ } return (pid != 0); } #endif /* _WIN32 */ /* * Append VARIABLE=VALUE\0 string to the buffer, and add a respective * pointer into the vars array. */ static char *mg_addenv(struct mg_cgi_env_block *block, const char *fmt, ...) { int n, space; char *added = block->buf + block->len; va_list ap; /* Calculate how much space is left in the buffer */ space = sizeof(block->buf) - (block->len + 2); if (space > 0) { /* Copy VARIABLE=VALUE\0 string into the free space */ va_start(ap, fmt); n = vsnprintf(added, (size_t) space, fmt, ap); va_end(ap); /* Make sure we do not overflow buffer and the envp array */ if (n > 0 && n + 1 < space && block->nvars < (int) ARRAY_SIZE(block->vars) - 2) { /* Append a pointer to the added string into the envp array */ block->vars[block->nvars++] = added; /* Bump up used length counter. Include \0 terminator */ block->len += n + 1; } } return added; } static void mg_addenv2(struct mg_cgi_env_block *blk, const char *name) { const char *s; if ((s = getenv(name)) != NULL) mg_addenv(blk, "%s=%s", name, s); } static void mg_prepare_cgi_environment(struct mg_connection *nc, const char *prog, const struct mg_str *path_info, const struct http_message *hm, const struct mg_serve_http_opts *opts, struct mg_cgi_env_block *blk) { const char *s; struct mg_str *h; char *p; size_t i; char buf[100]; blk->len = blk->nvars = 0; blk->nc = nc; if ((s = getenv("SERVER_NAME")) != NULL) { mg_addenv(blk, "SERVER_NAME=%s", s); } else { mg_sock_to_str(nc->sock, buf, sizeof(buf), 3); mg_addenv(blk, "SERVER_NAME=%s", buf); } mg_addenv(blk, "SERVER_ROOT=%s", opts->document_root); mg_addenv(blk, "DOCUMENT_ROOT=%s", opts->document_root); mg_addenv(blk, "SERVER_SOFTWARE=%s/%s", "Mongoose", MG_VERSION); /* Prepare the environment block */ mg_addenv(blk, "%s", "GATEWAY_INTERFACE=CGI/1.1"); mg_addenv(blk, "%s", "SERVER_PROTOCOL=HTTP/1.1"); mg_addenv(blk, "%s", "REDIRECT_STATUS=200"); /* For PHP */ mg_addenv(blk, "REQUEST_METHOD=%.*s", (int) hm->method.len, hm->method.p); mg_addenv(blk, "REQUEST_URI=%.*s%s%.*s", (int) hm->uri.len, hm->uri.p, hm->query_string.len == 0 ? "" : "?", (int) hm->query_string.len, hm->query_string.p); mg_conn_addr_to_str(nc, buf, sizeof(buf), MG_SOCK_STRINGIFY_REMOTE | MG_SOCK_STRINGIFY_IP); mg_addenv(blk, "REMOTE_ADDR=%s", buf); mg_conn_addr_to_str(nc, buf, sizeof(buf), MG_SOCK_STRINGIFY_PORT); mg_addenv(blk, "SERVER_PORT=%s", buf); s = hm->uri.p + hm->uri.len - path_info->len - 1; if (*s == '/') { const char *base_name = strrchr(prog, DIRSEP); mg_addenv(blk, "SCRIPT_NAME=%.*s/%s", (int) (s - hm->uri.p), hm->uri.p, (base_name != NULL ? base_name + 1 : prog)); } else { mg_addenv(blk, "SCRIPT_NAME=%.*s", (int) (s - hm->uri.p + 1), hm->uri.p); } mg_addenv(blk, "SCRIPT_FILENAME=%s", prog); if (path_info != NULL && path_info->len > 0) { mg_addenv(blk, "PATH_INFO=%.*s", (int) path_info->len, path_info->p); /* Not really translated... */ mg_addenv(blk, "PATH_TRANSLATED=%.*s", (int) path_info->len, path_info->p); } #if MG_ENABLE_SSL mg_addenv(blk, "HTTPS=%s", (nc->flags & MG_F_SSL ? "on" : "off")); #else mg_addenv(blk, "HTTPS=off"); #endif if ((h = mg_get_http_header((struct http_message *) hm, "Content-Type")) != NULL) { mg_addenv(blk, "CONTENT_TYPE=%.*s", (int) h->len, h->p); } if (hm->query_string.len > 0) { mg_addenv(blk, "QUERY_STRING=%.*s", (int) hm->query_string.len, hm->query_string.p); } if ((h = mg_get_http_header((struct http_message *) hm, "Content-Length")) != NULL) { mg_addenv(blk, "CONTENT_LENGTH=%.*s", (int) h->len, h->p); } mg_addenv2(blk, "PATH"); mg_addenv2(blk, "TMP"); mg_addenv2(blk, "TEMP"); mg_addenv2(blk, "TMPDIR"); mg_addenv2(blk, "PERLLIB"); mg_addenv2(blk, MG_ENV_EXPORT_TO_CGI); #ifdef _WIN32 mg_addenv2(blk, "COMSPEC"); mg_addenv2(blk, "SYSTEMROOT"); mg_addenv2(blk, "SystemDrive"); mg_addenv2(blk, "ProgramFiles"); mg_addenv2(blk, "ProgramFiles(x86)"); mg_addenv2(blk, "CommonProgramFiles(x86)"); #else mg_addenv2(blk, "LD_LIBRARY_PATH"); #endif /* _WIN32 */ /* Add all headers as HTTP_* variables */ for (i = 0; hm->header_names[i].len > 0; i++) { p = mg_addenv(blk, "HTTP_%.*s=%.*s", (int) hm->header_names[i].len, hm->header_names[i].p, (int) hm->header_values[i].len, hm->header_values[i].p); /* Convert variable name into uppercase, and change - to _ */ for (; *p != '=' && *p != '\0'; p++) { if (*p == '-') *p = '_'; *p = (char) toupper(*(unsigned char *) p); } } blk->vars[blk->nvars++] = NULL; blk->buf[blk->len++] = '\0'; } static void mg_cgi_ev_handler(struct mg_connection *cgi_nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { #if !MG_ENABLE_CALLBACK_USERDATA void *user_data = cgi_nc->user_data; #endif struct mg_connection *nc = (struct mg_connection *) user_data; (void) ev_data; if (nc == NULL) { cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY; return; } switch (ev) { case MG_EV_RECV: /* * CGI script does not output reply line, like "HTTP/1.1 CODE XXXXX\n" * It outputs headers, then body. Headers might include "Status" * header, which changes CODE, and it might include "Location" header * which changes CODE to 302. * * Therefore we do not send the output from the CGI script to the user * until all CGI headers are received. * * Here we parse the output from the CGI script, and if all headers has * been received, send appropriate reply line, and forward all * received headers to the client. */ if (nc->flags & MG_F_USER_1) { struct mbuf *io = &cgi_nc->recv_mbuf; int len = mg_http_get_request_len(io->buf, io->len); if (len == 0) break; if (len < 0 || io->len > MG_MAX_HTTP_REQUEST_SIZE) { cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY; mg_http_send_error(nc, 500, "Bad headers"); } else { struct http_message hm; struct mg_str *h; mg_http_parse_headers(io->buf, io->buf + io->len, io->len, &hm); if (mg_get_http_header(&hm, "Location") != NULL) { mg_printf(nc, "%s", "HTTP/1.1 302 Moved\r\n"); } else if ((h = mg_get_http_header(&hm, "Status")) != NULL) { mg_printf(nc, "HTTP/1.1 %.*s\r\n", (int) h->len, h->p); } else { mg_printf(nc, "%s", "HTTP/1.1 200 OK\r\n"); } } nc->flags &= ~MG_F_USER_1; } if (!(nc->flags & MG_F_USER_1)) { mg_forward(cgi_nc, nc); } break; case MG_EV_CLOSE: mg_http_free_proto_data_cgi(&mg_http_get_proto_data(nc)->cgi); nc->flags |= MG_F_SEND_AND_CLOSE; break; } } MG_INTERNAL void mg_handle_cgi(struct mg_connection *nc, const char *prog, const struct mg_str *path_info, const struct http_message *hm, const struct mg_serve_http_opts *opts) { struct mg_cgi_env_block blk; char dir[MAX_PATH_SIZE]; const char *p; sock_t fds[2]; DBG(("%p [%s]", nc, prog)); mg_prepare_cgi_environment(nc, prog, path_info, hm, opts, &blk); /* * CGI must be executed in its own directory. 'dir' must point to the * directory containing executable program, 'p' must point to the * executable program name relative to 'dir'. */ if ((p = strrchr(prog, DIRSEP)) == NULL) { snprintf(dir, sizeof(dir), "%s", "."); } else { snprintf(dir, sizeof(dir), "%.*s", (int) (p - prog), prog); prog = p + 1; } /* * Try to create socketpair in a loop until success. mg_socketpair() * can be interrupted by a signal and fail. * TODO(lsm): use sigaction to restart interrupted syscall */ do { mg_socketpair(fds, SOCK_STREAM); } while (fds[0] == INVALID_SOCKET); if (mg_start_process(opts->cgi_interpreter, prog, blk.buf, blk.vars, dir, fds[1]) != 0) { size_t n = nc->recv_mbuf.len - (hm->message.len - hm->body.len); struct mg_connection *cgi_nc = mg_add_sock(nc->mgr, fds[0], mg_cgi_ev_handler MG_UD_ARG(nc)); struct mg_http_proto_data *cgi_pd = mg_http_get_proto_data(nc); cgi_pd->cgi.cgi_nc = cgi_nc; #if !MG_ENABLE_CALLBACK_USERDATA cgi_pd->cgi.cgi_nc->user_data = nc; #endif nc->flags |= MG_F_USER_1; /* Push POST data to the CGI */ if (n > 0 && n < nc->recv_mbuf.len) { mg_send(cgi_pd->cgi.cgi_nc, hm->body.p, n); } mbuf_remove(&nc->recv_mbuf, nc->recv_mbuf.len); } else { closesocket(fds[0]); mg_http_send_error(nc, 500, "CGI failure"); } #ifndef _WIN32 closesocket(fds[1]); /* On Windows, CGI stdio thread closes that socket */ #endif } MG_INTERNAL void mg_http_free_proto_data_cgi(struct mg_http_proto_data_cgi *d) { if (d != NULL) { if (d->cgi_nc != NULL) d->cgi_nc->flags |= MG_F_CLOSE_IMMEDIATELY; memset(d, 0, sizeof(struct mg_http_proto_data_cgi)); } } #endif /* MG_ENABLE_HTTP && MG_ENABLE_HTTP_CGI */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/http_ssi.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_HTTP && MG_ENABLE_HTTP_SSI && MG_ENABLE_FILESYSTEM static void mg_send_ssi_file(struct mg_connection *nc, struct http_message *hm, const char *path, FILE *fp, int include_level, const struct mg_serve_http_opts *opts); static void mg_send_file_data(struct mg_connection *nc, FILE *fp) { char buf[BUFSIZ]; size_t n; while ((n = mg_fread(buf, 1, sizeof(buf), fp)) > 0) { mg_send(nc, buf, n); } } static void mg_do_ssi_include(struct mg_connection *nc, struct http_message *hm, const char *ssi, char *tag, int include_level, const struct mg_serve_http_opts *opts) { char file_name[BUFSIZ], path[MAX_PATH_SIZE], *p; FILE *fp; /* * sscanf() is safe here, since send_ssi_file() also uses buffer * of size MG_BUF_LEN to get the tag. So strlen(tag) is always < MG_BUF_LEN. */ if (sscanf(tag, " virtual=\"%[^\"]\"", file_name) == 1) { /* File name is relative to the webserver root */ snprintf(path, sizeof(path), "%s/%s", opts->document_root, file_name); } else if (sscanf(tag, " abspath=\"%[^\"]\"", file_name) == 1) { /* * File name is relative to the webserver working directory * or it is absolute system path */ snprintf(path, sizeof(path), "%s", file_name); } else if (sscanf(tag, " file=\"%[^\"]\"", file_name) == 1 || sscanf(tag, " \"%[^\"]\"", file_name) == 1) { /* File name is relative to the currect document */ snprintf(path, sizeof(path), "%s", ssi); if ((p = strrchr(path, DIRSEP)) != NULL) { p[1] = '\0'; } snprintf(path + strlen(path), sizeof(path) - strlen(path), "%s", file_name); } else { mg_printf(nc, "Bad SSI #include: [%s]", tag); return; } if ((fp = mg_fopen(path, "rb")) == NULL) { mg_printf(nc, "SSI include error: mg_fopen(%s): %s", path, strerror(mg_get_errno())); } else { mg_set_close_on_exec((sock_t) fileno(fp)); if (mg_match_prefix(opts->ssi_pattern, strlen(opts->ssi_pattern), path) > 0) { mg_send_ssi_file(nc, hm, path, fp, include_level + 1, opts); } else { mg_send_file_data(nc, fp); } fclose(fp); } } #if MG_ENABLE_HTTP_SSI_EXEC static void do_ssi_exec(struct mg_connection *nc, char *tag) { char cmd[BUFSIZ]; FILE *fp; if (sscanf(tag, " \"%[^\"]\"", cmd) != 1) { mg_printf(nc, "Bad SSI #exec: [%s]", tag); } else if ((fp = popen(cmd, "r")) == NULL) { mg_printf(nc, "Cannot SSI #exec: [%s]: %s", cmd, strerror(mg_get_errno())); } else { mg_send_file_data(nc, fp); pclose(fp); } } #endif /* MG_ENABLE_HTTP_SSI_EXEC */ /* * SSI directive has the following format: * <!--#directive parameter=value parameter=value --> */ static void mg_send_ssi_file(struct mg_connection *nc, struct http_message *hm, const char *path, FILE *fp, int include_level, const struct mg_serve_http_opts *opts) { static const struct mg_str btag = MG_MK_STR("<!--#"); static const struct mg_str d_include = MG_MK_STR("include"); static const struct mg_str d_call = MG_MK_STR("call"); #if MG_ENABLE_HTTP_SSI_EXEC static const struct mg_str d_exec = MG_MK_STR("exec"); #endif char buf[BUFSIZ], *p = buf + btag.len; /* p points to SSI directive */ int ch, len, in_ssi_tag; if (include_level > 10) { mg_printf(nc, "SSI #include level is too deep (%s)", path); return; } in_ssi_tag = len = 0; while ((ch = fgetc(fp)) != EOF) { if (in_ssi_tag && ch == '>' && buf[len - 1] == '-' && buf[len - 2] == '-') { size_t i = len - 2; in_ssi_tag = 0; /* Trim closing --> */ buf[i--] = '\0'; while (i > 0 && buf[i] == ' ') { buf[i--] = '\0'; } /* Handle known SSI directives */ if (strncmp(p, d_include.p, d_include.len) == 0) { mg_do_ssi_include(nc, hm, path, p + d_include.len + 1, include_level, opts); } else if (strncmp(p, d_call.p, d_call.len) == 0) { struct mg_ssi_call_ctx cctx; memset(&cctx, 0, sizeof(cctx)); cctx.req = hm; cctx.file = mg_mk_str(path); cctx.arg = mg_mk_str(p + d_call.len + 1); mg_call(nc, NULL, nc->user_data, MG_EV_SSI_CALL, (void *) cctx.arg.p); /* NUL added above */ mg_call(nc, NULL, nc->user_data, MG_EV_SSI_CALL_CTX, &cctx); #if MG_ENABLE_HTTP_SSI_EXEC } else if (strncmp(p, d_exec.p, d_exec.len) == 0) { do_ssi_exec(nc, p + d_exec.len + 1); #endif } else { /* Silently ignore unknown SSI directive. */ } len = 0; } else if (ch == '<') { in_ssi_tag = 1; if (len > 0) { mg_send(nc, buf, (size_t) len); } len = 0; buf[len++] = ch & 0xff; } else if (in_ssi_tag) { if (len == (int) btag.len && strncmp(buf, btag.p, btag.len) != 0) { /* Not an SSI tag */ in_ssi_tag = 0; } else if (len == (int) sizeof(buf) - 2) { mg_printf(nc, "%s: SSI tag is too large", path); len = 0; } buf[len++] = ch & 0xff; } else { buf[len++] = ch & 0xff; if (len == (int) sizeof(buf)) { mg_send(nc, buf, (size_t) len); len = 0; } } } /* Send the rest of buffered data */ if (len > 0) { mg_send(nc, buf, (size_t) len); } } MG_INTERNAL void mg_handle_ssi_request(struct mg_connection *nc, struct http_message *hm, const char *path, const struct mg_serve_http_opts *opts) { FILE *fp; struct mg_str mime_type; DBG(("%p %s", nc, path)); if ((fp = mg_fopen(path, "rb")) == NULL) { mg_http_send_error(nc, 404, NULL); } else { mg_set_close_on_exec((sock_t) fileno(fp)); mime_type = mg_get_mime_type(path, "text/plain", opts); mg_send_response_line(nc, 200, opts->extra_headers); mg_printf(nc, "Content-Type: %.*s\r\n" "Connection: close\r\n\r\n", (int) mime_type.len, mime_type.p); mg_send_ssi_file(nc, hm, path, fp, 0, opts); fclose(fp); nc->flags |= MG_F_SEND_AND_CLOSE; } } #endif /* MG_ENABLE_HTTP_SSI && MG_ENABLE_HTTP && MG_ENABLE_FILESYSTEM */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/http_webdav.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBDAV MG_INTERNAL int mg_is_dav_request(const struct mg_str *s) { static const char *methods[] = { "PUT", "DELETE", "MKCOL", "PROPFIND", "MOVE" #if MG_ENABLE_FAKE_DAVLOCK , "LOCK", "UNLOCK" #endif }; size_t i; for (i = 0; i < ARRAY_SIZE(methods); i++) { if (mg_vcmp(s, methods[i]) == 0) { return 1; } } return 0; } static int mg_mkdir(const char *path, uint32_t mode) { #ifndef _WIN32 return mkdir(path, mode); #else (void) mode; return _mkdir(path); #endif } static void mg_print_props(struct mg_connection *nc, const char *name, cs_stat_t *stp) { char mtime[64], buf[MAX_PATH_SIZE * 3]; time_t t = stp->st_mtime; /* store in local variable for NDK compile */ mg_gmt_time_string(mtime, sizeof(mtime), &t); mg_url_encode(name, strlen(name), buf, sizeof(buf)); mg_printf(nc, "<d:response>" "<d:href>%s</d:href>" "<d:propstat>" "<d:prop>" "<d:resourcetype>%s</d:resourcetype>" "<d:getcontentlength>%" INT64_FMT "</d:getcontentlength>" "<d:getlastmodified>%s</d:getlastmodified>" "</d:prop>" "<d:status>HTTP/1.1 200 OK</d:status>" "</d:propstat>" "</d:response>\n", buf, S_ISDIR(stp->st_mode) ? "<d:collection/>" : "", (int64_t) stp->st_size, mtime); } MG_INTERNAL void mg_handle_propfind(struct mg_connection *nc, const char *path, cs_stat_t *stp, struct http_message *hm, struct mg_serve_http_opts *opts) { static const char header[] = "HTTP/1.1 207 Multi-Status\r\n" "Connection: close\r\n" "Content-Type: text/xml; charset=utf-8\r\n\r\n" "<?xml version=\"1.0\" encoding=\"utf-8\"?>" "<d:multistatus xmlns:d='DAV:'>\n"; static const char footer[] = "</d:multistatus>\n"; const struct mg_str *depth = mg_get_http_header(hm, "Depth"); /* Print properties for the requested resource itself */ if (S_ISDIR(stp->st_mode) && strcmp(opts->enable_directory_listing, "yes") != 0) { mg_printf(nc, "%s", "HTTP/1.1 403 Directory Listing Denied\r\n\r\n"); } else { char uri[MAX_PATH_SIZE]; mg_send(nc, header, sizeof(header) - 1); snprintf(uri, sizeof(uri), "%.*s", (int) hm->uri.len, hm->uri.p); mg_print_props(nc, uri, stp); if (S_ISDIR(stp->st_mode) && (depth == NULL || mg_vcmp(depth, "0") != 0)) { mg_scan_directory(nc, path, opts, mg_print_props); } mg_send(nc, footer, sizeof(footer) - 1); nc->flags |= MG_F_SEND_AND_CLOSE; } } #if MG_ENABLE_FAKE_DAVLOCK /* * Windows explorer (probably there are another WebDav clients like it) * requires LOCK support in webdav. W/out this, it still works, but fails * to save file: shows error message and offers "Save As". * "Save as" works, but this message is very annoying. * This is fake lock, which doesn't lock something, just returns LOCK token, * UNLOCK always answers "OK". * With this fake LOCK Windows Explorer looks happy and saves file. * NOTE: that is not DAV LOCK imlementation, it is just a way to shut up * Windows native DAV client. This is why FAKE LOCK is not enabed by default */ MG_INTERNAL void mg_handle_lock(struct mg_connection *nc, const char *path) { static const char *reply = "HTTP/1.1 207 Multi-Status\r\n" "Connection: close\r\n" "Content-Type: text/xml; charset=utf-8\r\n\r\n" "<?xml version=\"1.0\" encoding=\"utf-8\"?>" "<d:multistatus xmlns:d='DAV:'>\n" "<D:lockdiscovery>\n" "<D:activelock>\n" "<D:locktoken>\n" "<D:href>\n" "opaquelocktoken:%s%u" "</D:href>" "</D:locktoken>" "</D:activelock>\n" "</D:lockdiscovery>" "</d:multistatus>\n"; mg_printf(nc, reply, path, (unsigned int) mg_time()); nc->flags |= MG_F_SEND_AND_CLOSE; } #endif MG_INTERNAL void mg_handle_mkcol(struct mg_connection *nc, const char *path, struct http_message *hm) { int status_code = 500; if (hm->body.len != (size_t) ~0 && hm->body.len > 0) { status_code = 415; } else if (!mg_mkdir(path, 0755)) { status_code = 201; } else if (errno == EEXIST) { status_code = 405; } else if (errno == EACCES) { status_code = 403; } else if (errno == ENOENT) { status_code = 409; } else { status_code = 500; } mg_http_send_error(nc, status_code, NULL); } static int mg_remove_directory(const struct mg_serve_http_opts *opts, const char *dir) { char path[MAX_PATH_SIZE]; struct dirent *dp; cs_stat_t st; DIR *dirp; if ((dirp = opendir(dir)) == NULL) return 0; while ((dp = readdir(dirp)) != NULL) { if (mg_is_file_hidden((const char *) dp->d_name, opts, 1)) { continue; } snprintf(path, sizeof(path), "%s%c%s", dir, '/', dp->d_name); mg_stat(path, &st); if (S_ISDIR(st.st_mode)) { mg_remove_directory(opts, path); } else { remove(path); } } closedir(dirp); rmdir(dir); return 1; } MG_INTERNAL void mg_handle_move(struct mg_connection *c, const struct mg_serve_http_opts *opts, const char *path, struct http_message *hm) { const struct mg_str *dest = mg_get_http_header(hm, "Destination"); if (dest == NULL) { mg_http_send_error(c, 411, NULL); } else { const char *p = (char *) memchr(dest->p, '/', dest->len); if (p != NULL && p[1] == '/' && (p = (char *) memchr(p + 2, '/', dest->p + dest->len - p)) != NULL) { char buf[MAX_PATH_SIZE]; snprintf(buf, sizeof(buf), "%s%.*s", opts->dav_document_root, (int) (dest->p + dest->len - p), p); if (rename(path, buf) == 0) { mg_http_send_error(c, 200, NULL); } else { mg_http_send_error(c, 418, NULL); } } else { mg_http_send_error(c, 500, NULL); } } } MG_INTERNAL void mg_handle_delete(struct mg_connection *nc, const struct mg_serve_http_opts *opts, const char *path) { cs_stat_t st; if (mg_stat(path, &st) != 0) { mg_http_send_error(nc, 404, NULL); } else if (S_ISDIR(st.st_mode)) { mg_remove_directory(opts, path); mg_http_send_error(nc, 204, NULL); } else if (remove(path) == 0) { mg_http_send_error(nc, 204, NULL); } else { mg_http_send_error(nc, 423, NULL); } } /* Return -1 on error, 1 on success. */ static int mg_create_itermediate_directories(const char *path) { const char *s; /* Create intermediate directories if they do not exist */ for (s = path + 1; *s != '\0'; s++) { if (*s == '/') { char buf[MAX_PATH_SIZE]; cs_stat_t st; snprintf(buf, sizeof(buf), "%.*s", (int) (s - path), path); buf[sizeof(buf) - 1] = '\0'; if (mg_stat(buf, &st) != 0 && mg_mkdir(buf, 0755) != 0) { return -1; } } } return 1; } MG_INTERNAL void mg_handle_put(struct mg_connection *nc, const char *path, struct http_message *hm) { struct mg_http_proto_data *pd = mg_http_get_proto_data(nc); cs_stat_t st; const struct mg_str *cl_hdr = mg_get_http_header(hm, "Content-Length"); int rc, status_code = mg_stat(path, &st) == 0 ? 200 : 201; mg_http_free_proto_data_file(&pd->file); if ((rc = mg_create_itermediate_directories(path)) == 0) { mg_printf(nc, "HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n", status_code); } else if (rc == -1) { mg_http_send_error(nc, 500, NULL); } else if (cl_hdr == NULL) { mg_http_send_error(nc, 411, NULL); } else if ((pd->file.fp = mg_fopen(path, "w+b")) == NULL) { mg_http_send_error(nc, 500, NULL); } else { const struct mg_str *range_hdr = mg_get_http_header(hm, "Content-Range"); int64_t r1 = 0, r2 = 0; pd->file.type = DATA_PUT; mg_set_close_on_exec((sock_t) fileno(pd->file.fp)); pd->file.cl = to64(cl_hdr->p); if (range_hdr != NULL && mg_http_parse_range_header(range_hdr, &r1, &r2) > 0) { status_code = 206; fseeko(pd->file.fp, r1, SEEK_SET); pd->file.cl = r2 > r1 ? r2 - r1 + 1 : pd->file.cl - r1; } mg_printf(nc, "HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n", status_code); /* Remove HTTP request from the mbuf, leave only payload */ mbuf_remove(&nc->recv_mbuf, hm->message.len - hm->body.len); mg_http_transfer_file_data(nc); } } #endif /* MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBDAV */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/http_websocket.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBSOCKET #ifndef MG_WEBSOCKET_PING_INTERVAL_SECONDS #define MG_WEBSOCKET_PING_INTERVAL_SECONDS 5 #endif #define MG_WS_NO_HOST_HEADER_MAGIC ((char *) 0x1) static int mg_is_ws_fragment(unsigned char flags) { return (flags & 0x80) == 0 || (flags & 0x0f) == 0; } static int mg_is_ws_first_fragment(unsigned char flags) { return (flags & 0x80) == 0 && (flags & 0x0f) != 0; } static void mg_handle_incoming_websocket_frame(struct mg_connection *nc, struct websocket_message *wsm) { if (wsm->flags & 0x8) { mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_CONTROL_FRAME, wsm); } else { mg_call(nc, nc->handler, nc->user_data, MG_EV_WEBSOCKET_FRAME, wsm); } } static int mg_deliver_websocket_data(struct mg_connection *nc) { /* Using unsigned char *, cause of integer arithmetic below */ uint64_t i, data_len = 0, frame_len = 0, buf_len = nc->recv_mbuf.len, len, mask_len = 0, header_len = 0; unsigned char *p = (unsigned char *) nc->recv_mbuf.buf, *buf = p, *e = p + buf_len; unsigned *sizep = (unsigned *) &p[1]; /* Size ptr for defragmented frames */ int ok, reass = buf_len > 0 && mg_is_ws_fragment(p[0]) && !(nc->flags & MG_F_WEBSOCKET_NO_DEFRAG); /* If that's a continuation frame that must be reassembled, handle it */ if (reass && !mg_is_ws_first_fragment(p[0]) && buf_len >= 1 + sizeof(*sizep) && buf_len >= 1 + sizeof(*sizep) + *sizep) { buf += 1 + sizeof(*sizep) + *sizep; buf_len -= 1 + sizeof(*sizep) + *sizep; } if (buf_len >= 2) { len = buf[1] & 127; mask_len = buf[1] & 128 ? 4 : 0; if (len < 126 && buf_len >= mask_len) { data_len = len; header_len = 2 + mask_len; } else if (len == 126 && buf_len >= 4 + mask_len) { header_len = 4 + mask_len; data_len = ntohs(*(uint16_t *) &buf[2]); } else if (buf_len >= 10 + mask_len) { header_len = 10 + mask_len; data_len = (((uint64_t) ntohl(*(uint32_t *) &buf[2])) << 32) + ntohl(*(uint32_t *) &buf[6]); } } frame_len = header_len + data_len; ok = frame_len > 0 && frame_len <= buf_len; if (ok) { struct websocket_message wsm; wsm.size = (size_t) data_len; wsm.data = buf + header_len; wsm.flags = buf[0]; /* Apply mask if necessary */ if (mask_len > 0) { for (i = 0; i < data_len; i++) { buf[i + header_len] ^= (buf + header_len - mask_len)[i % 4]; } } if (reass) { /* On first fragmented frame, nullify size */ if (mg_is_ws_first_fragment(wsm.flags)) { mbuf_resize(&nc->recv_mbuf, nc->recv_mbuf.size + sizeof(*sizep)); p[0] &= ~0x0f; /* Next frames will be treated as continuation */ buf = p + 1 + sizeof(*sizep); *sizep = 0; /* TODO(lsm): fix. this can stomp over frame data */ } /* Append this frame to the reassembled buffer */ memmove(buf, wsm.data, e - wsm.data); (*sizep) += wsm.size; nc->recv_mbuf.len -= wsm.data - buf; /* On last fragmented frame - call user handler and remove data */ if (wsm.flags & 0x80) { wsm.data = p + 1 + sizeof(*sizep); wsm.size = *sizep; mg_handle_incoming_websocket_frame(nc, &wsm); mbuf_remove(&nc->recv_mbuf, 1 + sizeof(*sizep) + *sizep); } } else { /* TODO(lsm): properly handle OOB control frames during defragmentation */ mg_handle_incoming_websocket_frame(nc, &wsm); mbuf_remove(&nc->recv_mbuf, (size_t) frame_len); /* Cleanup frame */ } /* If the frame is not reassembled - client closes and close too */ if (!reass && (buf[0] & 0x0f) == WEBSOCKET_OP_CLOSE) { nc->flags |= MG_F_SEND_AND_CLOSE; } } return ok; } struct ws_mask_ctx { size_t pos; /* zero means unmasked */ uint32_t mask; }; static uint32_t mg_ws_random_mask(void) { uint32_t mask; /* * The spec requires WS client to generate hard to * guess mask keys. From RFC6455, Section 5.3: * * The unpredictability of the masking key is essential to prevent * authors of malicious applications from selecting the bytes that appear on * the wire. * * Hence this feature is essential when the actual end user of this API * is untrusted code that wouldn't have access to a lower level net API * anyway (e.g. web browsers). Hence this feature is low prio for most * mongoose use cases and thus can be disabled, e.g. when porting to a platform * that lacks rand(). */ #if MG_DISABLE_WS_RANDOM_MASK mask = 0xefbeadde; /* generated with a random number generator, I swear */ #else if (sizeof(long) >= 4) { mask = (uint32_t) rand(); } else if (sizeof(long) == 2) { mask = (uint32_t) rand() << 16 | (uint32_t) rand(); } #endif return mask; } static void mg_send_ws_header(struct mg_connection *nc, int op, size_t len, struct ws_mask_ctx *ctx) { int header_len; unsigned char header[10]; header[0] = (op & WEBSOCKET_DONT_FIN ? 0x0 : 0x80) + (op & 0x0f); if (len < 126) { header[1] = (unsigned char) len; header_len = 2; } else if (len < 65535) { uint16_t tmp = htons((uint16_t) len); header[1] = 126; memcpy(&header[2], &tmp, sizeof(tmp)); header_len = 4; } else { uint32_t tmp; header[1] = 127; tmp = htonl((uint32_t)((uint64_t) len >> 32)); memcpy(&header[2], &tmp, sizeof(tmp)); tmp = htonl((uint32_t)(len & 0xffffffff)); memcpy(&header[6], &tmp, sizeof(tmp)); header_len = 10; } /* client connections enable masking */ if (nc->listener == NULL) { header[1] |= 1 << 7; /* set masking flag */ mg_send(nc, header, header_len); ctx->mask = mg_ws_random_mask(); mg_send(nc, &ctx->mask, sizeof(ctx->mask)); ctx->pos = nc->send_mbuf.len; } else { mg_send(nc, header, header_len); ctx->pos = 0; } } static void mg_ws_mask_frame(struct mbuf *mbuf, struct ws_mask_ctx *ctx) { size_t i; if (ctx->pos == 0) return; for (i = 0; i < (mbuf->len - ctx->pos); i++) { mbuf->buf[ctx->pos + i] ^= ((char *) &ctx->mask)[i % 4]; } } void mg_send_websocket_frame(struct mg_connection *nc, int op, const void *data, size_t len) { struct ws_mask_ctx ctx; DBG(("%p %d %d", nc, op, (int) len)); mg_send_ws_header(nc, op, len, &ctx); mg_send(nc, data, len); mg_ws_mask_frame(&nc->send_mbuf, &ctx); if (op == WEBSOCKET_OP_CLOSE) { nc->flags |= MG_F_SEND_AND_CLOSE; } } void mg_send_websocket_framev(struct mg_connection *nc, int op, const struct mg_str *strv, int strvcnt) { struct ws_mask_ctx ctx; int i; int len = 0; for (i = 0; i < strvcnt; i++) { len += strv[i].len; } mg_send_ws_header(nc, op, len, &ctx); for (i = 0; i < strvcnt; i++) { mg_send(nc, strv[i].p, strv[i].len); } mg_ws_mask_frame(&nc->send_mbuf, &ctx); if (op == WEBSOCKET_OP_CLOSE) { nc->flags |= MG_F_SEND_AND_CLOSE; } } void mg_printf_websocket_frame(struct mg_connection *nc, int op, const char *fmt, ...) { char mem[MG_VPRINTF_BUFFER_SIZE], *buf = mem; va_list ap; int len; va_start(ap, fmt); if ((len = mg_avprintf(&buf, sizeof(mem), fmt, ap)) > 0) { mg_send_websocket_frame(nc, op, buf, len); } va_end(ap); if (buf != mem && buf != NULL) { MG_FREE(buf); } } MG_INTERNAL void mg_ws_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { mg_call(nc, nc->handler, nc->user_data, ev, ev_data); switch (ev) { case MG_EV_RECV: do { } while (mg_deliver_websocket_data(nc)); break; case MG_EV_POLL: /* Ping idle websocket connections */ { time_t now = *(time_t *) ev_data; if (nc->flags & MG_F_IS_WEBSOCKET && now > nc->last_io_time + MG_WEBSOCKET_PING_INTERVAL_SECONDS) { mg_send_websocket_frame(nc, WEBSOCKET_OP_PING, "", 0); } } break; default: break; } #if MG_ENABLE_CALLBACK_USERDATA (void) user_data; #endif } #ifndef MG_EXT_SHA1 static void mg_hash_sha1_v(size_t num_msgs, const uint8_t *msgs[], const size_t *msg_lens, uint8_t *digest) { size_t i; cs_sha1_ctx sha_ctx; cs_sha1_init(&sha_ctx); for (i = 0; i < num_msgs; i++) { cs_sha1_update(&sha_ctx, msgs[i], msg_lens[i]); } cs_sha1_final(digest, &sha_ctx); } #else extern void mg_hash_sha1_v(size_t num_msgs, const uint8_t *msgs[], const size_t *msg_lens, uint8_t *digest); #endif MG_INTERNAL void mg_ws_handshake(struct mg_connection *nc, const struct mg_str *key) { static const char *magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; const uint8_t *msgs[2] = {(const uint8_t *) key->p, (const uint8_t *) magic}; const size_t msg_lens[2] = {key->len, 36}; unsigned char sha[20]; char b64_sha[30]; mg_hash_sha1_v(2, msgs, msg_lens, sha); mg_base64_encode(sha, sizeof(sha), b64_sha); mg_printf(nc, "%s%s%s", "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" "Sec-WebSocket-Accept: ", b64_sha, "\r\n\r\n"); DBG(("%p %.*s %s", nc, (int) key->len, key->p, b64_sha)); } void mg_send_websocket_handshake2(struct mg_connection *nc, const char *path, const char *host, const char *protocol, const char *extra_headers) { mg_send_websocket_handshake3(nc, path, host, protocol, extra_headers, NULL, NULL); } void mg_send_websocket_handshake3(struct mg_connection *nc, const char *path, const char *host, const char *protocol, const char *extra_headers, const char *user, const char *pass) { struct mbuf auth; char key[25]; uint32_t nonce[4]; nonce[0] = mg_ws_random_mask(); nonce[1] = mg_ws_random_mask(); nonce[2] = mg_ws_random_mask(); nonce[3] = mg_ws_random_mask(); mg_base64_encode((unsigned char *) &nonce, sizeof(nonce), key); mbuf_init(&auth, 0); if (user != NULL) { mg_basic_auth_header(user, pass, &auth); } /* * NOTE: the (auth.buf == NULL ? "" : auth.buf) is because cc3200 libc is * broken: it doesn't like zero length to be passed to %.*s * i.e. sprintf("f%.*so", (int)0, NULL), yields `f\0o`. * because it handles NULL specially (and incorrectly). */ mg_printf(nc, "GET %s HTTP/1.1\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" "%.*s" "Sec-WebSocket-Version: 13\r\n" "Sec-WebSocket-Key: %s\r\n", path, (int) auth.len, (auth.buf == NULL ? "" : auth.buf), key); /* TODO(mkm): take default hostname from http proto data if host == NULL */ if (host != MG_WS_NO_HOST_HEADER_MAGIC) { mg_printf(nc, "Host: %s\r\n", host); } if (protocol != NULL) { mg_printf(nc, "Sec-WebSocket-Protocol: %s\r\n", protocol); } if (extra_headers != NULL) { mg_printf(nc, "%s", extra_headers); } mg_printf(nc, "\r\n"); mbuf_free(&auth); } void mg_send_websocket_handshake(struct mg_connection *nc, const char *path, const char *extra_headers) { mg_send_websocket_handshake2(nc, path, MG_WS_NO_HOST_HEADER_MAGIC, NULL, extra_headers); } struct mg_connection *mg_connect_ws_opt( struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data), struct mg_connect_opts opts, const char *url, const char *protocol, const char *extra_headers) { char *user = NULL, *pass = NULL, *addr = NULL; const char *path = NULL; struct mg_connection *nc = mg_connect_http_base(mgr, MG_CB(ev_handler, user_data), opts, "ws://", "wss://", url, &path, &user, &pass, &addr); if (nc != NULL) { mg_send_websocket_handshake3(nc, path, addr, protocol, extra_headers, user, pass); } MG_FREE(addr); MG_FREE(user); MG_FREE(pass); return nc; } struct mg_connection *mg_connect_ws( struct mg_mgr *mgr, MG_CB(mg_event_handler_t ev_handler, void *user_data), const char *url, const char *protocol, const char *extra_headers) { struct mg_connect_opts opts; memset(&opts, 0, sizeof(opts)); return mg_connect_ws_opt(mgr, MG_CB(ev_handler, user_data), opts, url, protocol, extra_headers); } #endif /* MG_ENABLE_HTTP && MG_ENABLE_HTTP_WEBSOCKET */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/util.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "common/base64.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ /* For platforms with limited libc */ #ifndef MAX #define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif const char *mg_skip(const char *s, const char *end, const char *delims, struct mg_str *v) { v->p = s; while (s < end && strchr(delims, *(unsigned char *) s) == NULL) s++; v->len = s - v->p; while (s < end && strchr(delims, *(unsigned char *) s) != NULL) s++; return s; } static int lowercase(const char *s) { return tolower(*(const unsigned char *) s); } #if MG_ENABLE_FILESYSTEM && !defined(MG_USER_FILE_FUNCTIONS) int mg_stat(const char *path, cs_stat_t *st) { #ifdef _WIN32 wchar_t wpath[MAX_PATH_SIZE]; to_wchar(path, wpath, ARRAY_SIZE(wpath)); DBG(("[%ls] -> %d", wpath, _wstati64(wpath, st))); return _wstati64(wpath, st); #else return stat(path, st); #endif } FILE *mg_fopen(const char *path, const char *mode) { #ifdef _WIN32 wchar_t wpath[MAX_PATH_SIZE], wmode[10]; to_wchar(path, wpath, ARRAY_SIZE(wpath)); to_wchar(mode, wmode, ARRAY_SIZE(wmode)); return _wfopen(wpath, wmode); #else return fopen(path, mode); #endif } int mg_open(const char *path, int flag, int mode) { /* LCOV_EXCL_LINE */ #if defined(_WIN32) && !defined(WINCE) wchar_t wpath[MAX_PATH_SIZE]; to_wchar(path, wpath, ARRAY_SIZE(wpath)); return _wopen(wpath, flag, mode); #else return open(path, flag, mode); /* LCOV_EXCL_LINE */ #endif } size_t mg_fread(void *ptr, size_t size, size_t count, FILE *f) { return fread(ptr, size, count, f); } size_t mg_fwrite(const void *ptr, size_t size, size_t count, FILE *f) { return fwrite(ptr, size, count, f); } #endif void mg_base64_encode(const unsigned char *src, int src_len, char *dst) { cs_base64_encode(src, src_len, dst); } int mg_base64_decode(const unsigned char *s, int len, char *dst) { return cs_base64_decode(s, len, dst, NULL); } #if MG_ENABLE_THREADS void *mg_start_thread(void *(*f)(void *), void *p) { #ifdef WINCE return (void *) CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) f, p, 0, NULL); #elif defined(_WIN32) return (void *) _beginthread((void(__cdecl *) (void *) ) f, 0, p); #else pthread_t thread_id = (pthread_t) 0; pthread_attr_t attr; (void) pthread_attr_init(&attr); (void) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); #if defined(MG_STACK_SIZE) && MG_STACK_SIZE > 1 (void) pthread_attr_setstacksize(&attr, MG_STACK_SIZE); #endif pthread_create(&thread_id, &attr, f, p); pthread_attr_destroy(&attr); return (void *) thread_id; #endif } #endif /* MG_ENABLE_THREADS */ /* Set close-on-exec bit for a given socket. */ void mg_set_close_on_exec(sock_t sock) { #if defined(_WIN32) && !defined(WINCE) (void) SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0); #elif defined(__unix__) fcntl(sock, F_SETFD, FD_CLOEXEC); #else (void) sock; #endif } void mg_sock_addr_to_str(const union socket_address *sa, char *buf, size_t len, int flags) { int is_v6; if (buf == NULL || len <= 0) return; memset(buf, 0, len); #if MG_ENABLE_IPV6 is_v6 = sa->sa.sa_family == AF_INET6; #else is_v6 = 0; #endif if (flags & MG_SOCK_STRINGIFY_IP) { #if MG_ENABLE_IPV6 const void *addr = NULL; char *start = buf; socklen_t capacity = len; if (!is_v6) { addr = &sa->sin.sin_addr; } else { addr = (void *) &sa->sin6.sin6_addr; if (flags & MG_SOCK_STRINGIFY_PORT) { *buf = '['; start++; capacity--; } } if (inet_ntop(sa->sa.sa_family, addr, start, capacity) == NULL) { goto cleanup; } #elif defined(_WIN32) || MG_LWIP || (MG_NET_IF == MG_NET_IF_PIC32) /* Only Windoze Vista (and newer) have inet_ntop() */ char *addr_str = inet_ntoa(sa->sin.sin_addr); if (addr_str != NULL) { strncpy(buf, inet_ntoa(sa->sin.sin_addr), len - 1); } else { goto cleanup; } #else if (inet_ntop(AF_INET, (void *) &sa->sin.sin_addr, buf, len - 1) == NULL) { goto cleanup; } #endif } if (flags & MG_SOCK_STRINGIFY_PORT) { int port = ntohs(sa->sin.sin_port); if (flags & MG_SOCK_STRINGIFY_IP) { int buf_len = strlen(buf); snprintf(buf + buf_len, len - (buf_len + 1), "%s:%d", (is_v6 ? "]" : ""), port); } else { snprintf(buf, len, "%d", port); } } return; cleanup: *buf = '\0'; } void mg_conn_addr_to_str(struct mg_connection *nc, char *buf, size_t len, int flags) { union socket_address sa; memset(&sa, 0, sizeof(sa)); mg_if_get_conn_addr(nc, flags & MG_SOCK_STRINGIFY_REMOTE, &sa); mg_sock_addr_to_str(&sa, buf, len, flags); } #if MG_ENABLE_HEXDUMP static int mg_hexdump_n(const void *buf, int len, char *dst, int dst_len, int offset) { const unsigned char *p = (const unsigned char *) buf; char ascii[17] = ""; int i, idx, n = 0; for (i = 0; i < len; i++) { idx = i % 16; if (idx == 0) { if (i > 0) n += snprintf(dst + n, MAX(dst_len - n, 0), " %s\n", ascii); n += snprintf(dst + n, MAX(dst_len - n, 0), "%04x ", i + offset); } if (dst_len - n < 0) { return n; } n += snprintf(dst + n, MAX(dst_len - n, 0), " %02x", p[i]); ascii[idx] = p[i] < 0x20 || p[i] > 0x7e ? '.' : p[i]; ascii[idx + 1] = '\0'; } while (i++ % 16) n += snprintf(dst + n, MAX(dst_len - n, 0), "%s", " "); n += snprintf(dst + n, MAX(dst_len - n, 0), " %s\n", ascii); return n; } int mg_hexdump(const void *buf, int len, char *dst, int dst_len) { return mg_hexdump_n(buf, len, dst, dst_len, 0); } void mg_hexdumpf(FILE *fp, const void *buf, int len) { char tmp[80]; int offset = 0, n; while (len > 0) { n = (len < 16 ? len : 16); mg_hexdump_n(((const char *) buf) + offset, n, tmp, sizeof(tmp), offset); fputs(tmp, fp); offset += n; len -= n; } } void mg_hexdump_connection(struct mg_connection *nc, const char *path, const void *buf, int num_bytes, int ev) { FILE *fp = NULL; char *hexbuf, src[60], dst[60]; int buf_size = num_bytes * 5 + 100; if (strcmp(path, "-") == 0) { fp = stdout; } else if (strcmp(path, "--") == 0) { fp = stderr; #if MG_ENABLE_FILESYSTEM } else { fp = mg_fopen(path, "a"); #endif } if (fp == NULL) return; mg_conn_addr_to_str(nc, src, sizeof(src), MG_SOCK_STRINGIFY_IP | MG_SOCK_STRINGIFY_PORT); mg_conn_addr_to_str(nc, dst, sizeof(dst), MG_SOCK_STRINGIFY_IP | MG_SOCK_STRINGIFY_PORT | MG_SOCK_STRINGIFY_REMOTE); fprintf( fp, "%lu %p %s %s %s %d\n", (unsigned long) mg_time(), (void *) nc, src, ev == MG_EV_RECV ? "<-" : ev == MG_EV_SEND ? "->" : ev == MG_EV_ACCEPT ? "<A" : ev == MG_EV_CONNECT ? "C>" : "XX", dst, num_bytes); if (num_bytes > 0 && (hexbuf = (char *) MG_MALLOC(buf_size)) != NULL) { mg_hexdump(buf, num_bytes, hexbuf, buf_size); fprintf(fp, "%s", hexbuf); MG_FREE(hexbuf); } if (fp != stdin && fp != stdout) fclose(fp); } #endif int mg_is_big_endian(void) { static const int n = 1; /* TODO(mkm) use compiletime check with 4-byte char literal */ return ((char *) &n)[0] == 0; } const char *mg_next_comma_list_entry(const char *list, struct mg_str *val, struct mg_str *eq_val) { if (list == NULL || *list == '\0') { /* End of the list */ list = NULL; } else { val->p = list; if ((list = strchr(val->p, ',')) != NULL) { /* Comma found. Store length and shift the list ptr */ val->len = list - val->p; list++; } else { /* This value is the last one */ list = val->p + strlen(val->p); val->len = list - val->p; } if (eq_val != NULL) { /* Value has form "x=y", adjust pointers and lengths */ /* so that val points to "x", and eq_val points to "y". */ eq_val->len = 0; eq_val->p = (const char *) memchr(val->p, '=', val->len); if (eq_val->p != NULL) { eq_val->p++; /* Skip over '=' character */ eq_val->len = val->p + val->len - eq_val->p; val->len = (eq_val->p - val->p) - 1; } } } return list; } int mg_match_prefix_n(const struct mg_str pattern, const struct mg_str str) { const char *or_str; size_t len, i = 0, j = 0; int res; if ((or_str = (const char *) memchr(pattern.p, '|', pattern.len)) != NULL) { struct mg_str pstr = {pattern.p, (size_t)(or_str - pattern.p)}; res = mg_match_prefix_n(pstr, str); if (res > 0) return res; pstr.p = or_str + 1; pstr.len = (pattern.p + pattern.len) - (or_str + 1); return mg_match_prefix_n(pstr, str); } for (; i < pattern.len; i++, j++) { if (pattern.p[i] == '?' && j != str.len) { continue; } else if (pattern.p[i] == '$') { return j == str.len ? (int) j : -1; } else if (pattern.p[i] == '*') { i++; if (pattern.p[i] == '*') { i++; len = str.len - j; } else { len = 0; while (j + len != str.len && str.p[j + len] != '/') { len++; } } if (i == pattern.len) { return j + len; } do { const struct mg_str pstr = {pattern.p + i, pattern.len - i}; const struct mg_str sstr = {str.p + j + len, str.len - j - len}; res = mg_match_prefix_n(pstr, sstr); } while (res == -1 && len-- > 0); return res == -1 ? -1 : (int) (j + res + len); } else if (lowercase(&pattern.p[i]) != lowercase(&str.p[j])) { return -1; } } return j; } int mg_match_prefix(const char *pattern, int pattern_len, const char *str) { const struct mg_str pstr = {pattern, (size_t) pattern_len}; return mg_match_prefix_n(pstr, mg_mk_str(str)); } DO_NOT_WARN_UNUSED MG_INTERNAL int mg_get_errno(void) { #ifndef WINCE return errno; #else /* TODO(alashkin): translate error codes? */ return GetLastError(); #endif } void mg_mbuf_append_base64_putc(char ch, void *user_data) { struct mbuf *mbuf = (struct mbuf *) user_data; mbuf_append(mbuf, &ch, sizeof(ch)); } void mg_mbuf_append_base64(struct mbuf *mbuf, const void *data, size_t len) { struct cs_base64_ctx ctx; cs_base64_init(&ctx, mg_mbuf_append_base64_putc, mbuf); cs_base64_update(&ctx, (const char *) data, len); cs_base64_finish(&ctx); } void mg_basic_auth_header(const char *user, const char *pass, struct mbuf *buf) { const char *header_prefix = "Authorization: Basic "; const char *header_suffix = "\r\n"; struct cs_base64_ctx ctx; cs_base64_init(&ctx, mg_mbuf_append_base64_putc, buf); mbuf_append(buf, header_prefix, strlen(header_prefix)); cs_base64_update(&ctx, user, strlen(user)); if (pass != NULL) { cs_base64_update(&ctx, ":", 1); cs_base64_update(&ctx, pass, strlen(pass)); } cs_base64_finish(&ctx); mbuf_append(buf, header_suffix, strlen(header_suffix)); } #ifdef MG_MODULE_LINES #line 1 "mongoose/src/mqtt.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_MQTT #include <string.h> /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/mqtt.h" */ static uint16_t getu16(const char *p) { const uint8_t *up = (const uint8_t *) p; return (up[0] << 8) + up[1]; } static const char *scanto(const char *p, struct mg_str *s) { s->len = getu16(p); s->p = p + 2; return s->p + s->len; } MG_INTERNAL int parse_mqtt(struct mbuf *io, struct mg_mqtt_message *mm) { uint8_t header; size_t len = 0; int cmd; const char *p = &io->buf[1], *end; if (io->len < 2) return -1; header = io->buf[0]; cmd = header >> 4; /* decode mqtt variable length */ do { len += (*p & 127) << 7 * (p - &io->buf[1]); } while ((*p++ & 128) != 0 && ((size_t)(p - io->buf) <= io->len)); end = p + len; if (end > io->buf + io->len + 1) { return -1; } mm->cmd = cmd; mm->qos = MG_MQTT_GET_QOS(header); switch (cmd) { case MG_MQTT_CMD_CONNECT: { p = scanto(p, &mm->protocol_name); mm->protocol_version = *(uint8_t *) p++; mm->connect_flags = *(uint8_t *) p++; mm->keep_alive_timer = getu16(p); p += 2; if (p < end) p = scanto(p, &mm->client_id); if (p < end && (mm->connect_flags & MG_MQTT_HAS_WILL)) p = scanto(p, &mm->will_topic); if (p < end && (mm->connect_flags & MG_MQTT_HAS_WILL)) p = scanto(p, &mm->will_message); if (p < end && (mm->connect_flags & MG_MQTT_HAS_USER_NAME)) p = scanto(p, &mm->user_name); if (p < end && (mm->connect_flags & MG_MQTT_HAS_PASSWORD)) p = scanto(p, &mm->password); LOG(LL_DEBUG, ("%d %2x %d proto [%.*s] client_id [%.*s] will_topic [%.*s] " "will_msg [%.*s] user_name [%.*s] password [%.*s]", len, (int) mm->connect_flags, (int) mm->keep_alive_timer, (int) mm->protocol_name.len, mm->protocol_name.p, (int) mm->client_id.len, mm->client_id.p, (int) mm->will_topic.len, mm->will_topic.p, (int) mm->will_message.len, mm->will_message.p, (int) mm->user_name.len, mm->user_name.p, (int) mm->password.len, mm->password.p)); break; } case MG_MQTT_CMD_CONNACK: mm->connack_ret_code = p[1]; break; case MG_MQTT_CMD_PUBACK: case MG_MQTT_CMD_PUBREC: case MG_MQTT_CMD_PUBREL: case MG_MQTT_CMD_PUBCOMP: case MG_MQTT_CMD_SUBACK: mm->message_id = getu16(p); break; case MG_MQTT_CMD_PUBLISH: { p = scanto(p, &mm->topic); if (mm->qos > 0) { mm->message_id = getu16(p); p += 2; } mm->payload.p = p; mm->payload.len = end - p; break; } case MG_MQTT_CMD_SUBSCRIBE: mm->message_id = getu16(p); p += 2; /* * topic expressions are left in the payload and can be parsed with * `mg_mqtt_next_subscribe_topic` */ mm->payload.p = p; mm->payload.len = end - p; break; default: /* Unhandled command */ break; } return end - io->buf; } static void mqtt_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { int len; struct mbuf *io = &nc->recv_mbuf; struct mg_mqtt_message mm; memset(&mm, 0, sizeof(mm)); nc->handler(nc, ev, ev_data MG_UD_ARG(user_data)); switch (ev) { case MG_EV_RECV: len = parse_mqtt(io, &mm); if (len == -1) break; /* not fully buffered */ nc->handler(nc, MG_MQTT_EVENT_BASE + mm.cmd, &mm MG_UD_ARG(user_data)); mbuf_remove(io, len); break; } } static void mg_mqtt_proto_data_destructor(void *proto_data) { MG_FREE(proto_data); } int mg_mqtt_match_topic_expression(struct mg_str exp, struct mg_str topic) { /* TODO(mkm): implement real matching */ if (memchr(exp.p, '#', exp.len)) { /* exp `foo/#` will become `foo/` */ exp.len -= 1; /* * topic should be longer than the expression: e.g. topic `foo/bar` does * match `foo/#`, but neither `foo` nor `foo/` do. */ if (topic.len <= exp.len) { return 0; } /* Truncate topic so that it'll pass the next length check */ topic.len = exp.len; } if (topic.len != exp.len) { return 0; } return strncmp(topic.p, exp.p, exp.len) == 0; } int mg_mqtt_vmatch_topic_expression(const char *exp, struct mg_str topic) { return mg_mqtt_match_topic_expression(mg_mk_str(exp), topic); } void mg_set_protocol_mqtt(struct mg_connection *nc) { nc->proto_handler = mqtt_handler; nc->proto_data = MG_CALLOC(1, sizeof(struct mg_mqtt_proto_data)); nc->proto_data_destructor = mg_mqtt_proto_data_destructor; } void mg_send_mqtt_handshake(struct mg_connection *nc, const char *client_id) { static struct mg_send_mqtt_handshake_opts opts; mg_send_mqtt_handshake_opt(nc, client_id, opts); } void mg_send_mqtt_handshake_opt(struct mg_connection *nc, const char *client_id, struct mg_send_mqtt_handshake_opts opts) { uint8_t header = MG_MQTT_CMD_CONNECT << 4; uint8_t rem_len; uint16_t keep_alive; uint16_t len; struct mg_mqtt_proto_data *pd = (struct mg_mqtt_proto_data *) nc->proto_data; /* * 9: version_header(len, magic_string, version_number), 1: flags, 2: * keep-alive timer, * 2: client_identifier_len, n: client_id */ rem_len = 9 + 1 + 2 + 2 + (uint8_t) strlen(client_id); if (opts.user_name != NULL) { opts.flags |= MG_MQTT_HAS_USER_NAME; rem_len += (uint8_t) strlen(opts.user_name) + 2; } if (opts.password != NULL) { opts.flags |= MG_MQTT_HAS_PASSWORD; rem_len += (uint8_t) strlen(opts.password) + 2; } if (opts.will_topic != NULL && opts.will_message != NULL) { opts.flags |= MG_MQTT_HAS_WILL; rem_len += (uint8_t) strlen(opts.will_topic) + 2; rem_len += (uint8_t) strlen(opts.will_message) + 2; } mg_send(nc, &header, 1); mg_send(nc, &rem_len, 1); mg_send(nc, "\00\06MQIsdp\03", 9); mg_send(nc, &opts.flags, 1); if (opts.keep_alive == 0) { opts.keep_alive = 60; } keep_alive = htons(opts.keep_alive); mg_send(nc, &keep_alive, 2); len = htons((uint16_t) strlen(client_id)); mg_send(nc, &len, 2); mg_send(nc, client_id, strlen(client_id)); if (opts.flags & MG_MQTT_HAS_WILL) { len = htons((uint16_t) strlen(opts.will_topic)); mg_send(nc, &len, 2); mg_send(nc, opts.will_topic, strlen(opts.will_topic)); len = htons((uint16_t) strlen(opts.will_message)); mg_send(nc, &len, 2); mg_send(nc, opts.will_message, strlen(opts.will_message)); } if (opts.flags & MG_MQTT_HAS_USER_NAME) { len = htons((uint16_t) strlen(opts.user_name)); mg_send(nc, &len, 2); mg_send(nc, opts.user_name, strlen(opts.user_name)); } if (opts.flags & MG_MQTT_HAS_PASSWORD) { len = htons((uint16_t) strlen(opts.password)); mg_send(nc, &len, 2); mg_send(nc, opts.password, strlen(opts.password)); } if (pd != NULL) { pd->keep_alive = opts.keep_alive; } } static void mg_mqtt_prepend_header(struct mg_connection *nc, uint8_t cmd, uint8_t flags, size_t len) { size_t off = nc->send_mbuf.len - len; uint8_t header = cmd << 4 | (uint8_t) flags; uint8_t buf[1 + sizeof(size_t)]; uint8_t *vlen = &buf[1]; assert(nc->send_mbuf.len >= len); buf[0] = header; /* mqtt variable length encoding */ do { *vlen = len % 0x80; len /= 0x80; if (len > 0) *vlen |= 0x80; vlen++; } while (len > 0); mbuf_insert(&nc->send_mbuf, off, buf, vlen - buf); } void mg_mqtt_publish(struct mg_connection *nc, const char *topic, uint16_t message_id, int flags, const void *data, size_t len) { size_t old_len = nc->send_mbuf.len; uint16_t topic_len = htons((uint16_t) strlen(topic)); uint16_t message_id_net = htons(message_id); mg_send(nc, &topic_len, 2); mg_send(nc, topic, strlen(topic)); if (MG_MQTT_GET_QOS(flags) > 0) { mg_send(nc, &message_id_net, 2); } mg_send(nc, data, len); mg_mqtt_prepend_header(nc, MG_MQTT_CMD_PUBLISH, flags, nc->send_mbuf.len - old_len); } void mg_mqtt_subscribe(struct mg_connection *nc, const struct mg_mqtt_topic_expression *topics, size_t topics_len, uint16_t message_id) { size_t old_len = nc->send_mbuf.len; uint16_t message_id_n = htons(message_id); size_t i; mg_send(nc, (char *) &message_id_n, 2); for (i = 0; i < topics_len; i++) { uint16_t topic_len_n = htons((uint16_t) strlen(topics[i].topic)); mg_send(nc, &topic_len_n, 2); mg_send(nc, topics[i].topic, strlen(topics[i].topic)); mg_send(nc, &topics[i].qos, 1); } mg_mqtt_prepend_header(nc, MG_MQTT_CMD_SUBSCRIBE, MG_MQTT_QOS(1), nc->send_mbuf.len - old_len); } int mg_mqtt_next_subscribe_topic(struct mg_mqtt_message *msg, struct mg_str *topic, uint8_t *qos, int pos) { unsigned char *buf = (unsigned char *) msg->payload.p + pos; if ((size_t) pos >= msg->payload.len) { return -1; } topic->len = buf[0] << 8 | buf[1]; topic->p = (char *) buf + 2; *qos = buf[2 + topic->len]; return pos + 2 + topic->len + 1; } void mg_mqtt_unsubscribe(struct mg_connection *nc, char **topics, size_t topics_len, uint16_t message_id) { size_t old_len = nc->send_mbuf.len; uint16_t message_id_n = htons(message_id); size_t i; mg_send(nc, (char *) &message_id_n, 2); for (i = 0; i < topics_len; i++) { uint16_t topic_len_n = htons((uint16_t) strlen(topics[i])); mg_send(nc, &topic_len_n, 2); mg_send(nc, topics[i], strlen(topics[i])); } mg_mqtt_prepend_header(nc, MG_MQTT_CMD_UNSUBSCRIBE, MG_MQTT_QOS(1), nc->send_mbuf.len - old_len); } void mg_mqtt_connack(struct mg_connection *nc, uint8_t return_code) { uint8_t unused = 0; mg_send(nc, &unused, 1); mg_send(nc, &return_code, 1); mg_mqtt_prepend_header(nc, MG_MQTT_CMD_CONNACK, 0, 2); } /* * Sends a command which contains only a `message_id` and a QoS level of 1. * * Helper function. */ static void mg_send_mqtt_short_command(struct mg_connection *nc, uint8_t cmd, uint16_t message_id) { uint16_t message_id_net = htons(message_id); uint8_t flags = (cmd == MG_MQTT_CMD_PUBREL ? 2 : 0); mg_send(nc, &message_id_net, 2); mg_mqtt_prepend_header(nc, cmd, flags, 2 /* len */); } void mg_mqtt_puback(struct mg_connection *nc, uint16_t message_id) { mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBACK, message_id); } void mg_mqtt_pubrec(struct mg_connection *nc, uint16_t message_id) { mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBREC, message_id); } void mg_mqtt_pubrel(struct mg_connection *nc, uint16_t message_id) { mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBREL, message_id); } void mg_mqtt_pubcomp(struct mg_connection *nc, uint16_t message_id) { mg_send_mqtt_short_command(nc, MG_MQTT_CMD_PUBCOMP, message_id); } void mg_mqtt_suback(struct mg_connection *nc, uint8_t *qoss, size_t qoss_len, uint16_t message_id) { size_t i; uint16_t message_id_net = htons(message_id); mg_send(nc, &message_id_net, 2); for (i = 0; i < qoss_len; i++) { mg_send(nc, &qoss[i], 1); } mg_mqtt_prepend_header(nc, MG_MQTT_CMD_SUBACK, MG_MQTT_QOS(1), 2 + qoss_len); } void mg_mqtt_unsuback(struct mg_connection *nc, uint16_t message_id) { mg_send_mqtt_short_command(nc, MG_MQTT_CMD_UNSUBACK, message_id); } void mg_mqtt_ping(struct mg_connection *nc) { mg_mqtt_prepend_header(nc, MG_MQTT_CMD_PINGREQ, 0, 0); } void mg_mqtt_pong(struct mg_connection *nc) { mg_mqtt_prepend_header(nc, MG_MQTT_CMD_PINGRESP, 0, 0); } void mg_mqtt_disconnect(struct mg_connection *nc) { mg_mqtt_prepend_header(nc, MG_MQTT_CMD_DISCONNECT, 0, 0); } #endif /* MG_ENABLE_MQTT */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/mqtt_server.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/mqtt-server.h" */ #if MG_ENABLE_MQTT_BROKER static void mg_mqtt_session_init(struct mg_mqtt_broker *brk, struct mg_mqtt_session *s, struct mg_connection *nc) { s->brk = brk; s->subscriptions = NULL; s->num_subscriptions = 0; s->nc = nc; } static void mg_mqtt_add_session(struct mg_mqtt_session *s) { LIST_INSERT_HEAD(&s->brk->sessions, s, link); } static void mg_mqtt_remove_session(struct mg_mqtt_session *s) { LIST_REMOVE(s, link); } static void mg_mqtt_destroy_session(struct mg_mqtt_session *s) { size_t i; for (i = 0; i < s->num_subscriptions; i++) { MG_FREE((void *) s->subscriptions[i].topic); } MG_FREE(s->subscriptions); MG_FREE(s); } static void mg_mqtt_close_session(struct mg_mqtt_session *s) { mg_mqtt_remove_session(s); mg_mqtt_destroy_session(s); } void mg_mqtt_broker_init(struct mg_mqtt_broker *brk, void *user_data) { LIST_INIT(&brk->sessions); brk->user_data = user_data; } static void mg_mqtt_broker_handle_connect(struct mg_mqtt_broker *brk, struct mg_connection *nc) { struct mg_mqtt_session *s = (struct mg_mqtt_session *) MG_CALLOC(1, sizeof *s); if (s == NULL) { /* LCOV_EXCL_START */ mg_mqtt_connack(nc, MG_EV_MQTT_CONNACK_SERVER_UNAVAILABLE); return; /* LCOV_EXCL_STOP */ } /* TODO(mkm): check header (magic and version) */ mg_mqtt_session_init(brk, s, nc); s->user_data = nc->user_data; nc->user_data = s; mg_mqtt_add_session(s); mg_mqtt_connack(nc, MG_EV_MQTT_CONNACK_ACCEPTED); } static void mg_mqtt_broker_handle_subscribe(struct mg_connection *nc, struct mg_mqtt_message *msg) { struct mg_mqtt_session *ss = (struct mg_mqtt_session *) nc->user_data; uint8_t qoss[512]; size_t qoss_len = 0; struct mg_str topic; uint8_t qos; int pos; struct mg_mqtt_topic_expression *te; for (pos = 0; (pos = mg_mqtt_next_subscribe_topic(msg, &topic, &qos, pos)) != -1;) { qoss[qoss_len++] = qos; } ss->subscriptions = (struct mg_mqtt_topic_expression *) MG_REALLOC( ss->subscriptions, sizeof(*ss->subscriptions) * qoss_len); for (pos = 0; (pos = mg_mqtt_next_subscribe_topic(msg, &topic, &qos, pos)) != -1; ss->num_subscriptions++) { te = &ss->subscriptions[ss->num_subscriptions]; te->topic = (char *) MG_MALLOC(topic.len + 1); te->qos = qos; strncpy((char *) te->topic, topic.p, topic.len + 1); } mg_mqtt_suback(nc, qoss, qoss_len, msg->message_id); } static void mg_mqtt_broker_handle_publish(struct mg_mqtt_broker *brk, struct mg_mqtt_message *msg) { struct mg_mqtt_session *s; size_t i; for (s = mg_mqtt_next(brk, NULL); s != NULL; s = mg_mqtt_next(brk, s)) { for (i = 0; i < s->num_subscriptions; i++) { if (mg_mqtt_vmatch_topic_expression(s->subscriptions[i].topic, msg->topic)) { char buf[100], *p = buf; mg_asprintf(&p, sizeof(buf), "%.*s", (int) msg->topic.len, msg->topic.p); if (p == NULL) { return; } mg_mqtt_publish(s->nc, p, 0, 0, msg->payload.p, msg->payload.len); if (p != buf) { MG_FREE(p); } break; } } } } void mg_mqtt_broker(struct mg_connection *nc, int ev, void *data) { struct mg_mqtt_message *msg = (struct mg_mqtt_message *) data; struct mg_mqtt_broker *brk; if (nc->listener) { brk = (struct mg_mqtt_broker *) nc->listener->user_data; } else { brk = (struct mg_mqtt_broker *) nc->user_data; } switch (ev) { case MG_EV_ACCEPT: mg_set_protocol_mqtt(nc); nc->user_data = NULL; /* Clear up the inherited pointer to broker */ break; case MG_EV_MQTT_CONNECT: mg_mqtt_broker_handle_connect(brk, nc); break; case MG_EV_MQTT_SUBSCRIBE: mg_mqtt_broker_handle_subscribe(nc, msg); break; case MG_EV_MQTT_PUBLISH: mg_mqtt_broker_handle_publish(brk, msg); break; case MG_EV_CLOSE: if (nc->listener && nc->user_data != NULL) { mg_mqtt_close_session((struct mg_mqtt_session *) nc->user_data); } break; } } struct mg_mqtt_session *mg_mqtt_next(struct mg_mqtt_broker *brk, struct mg_mqtt_session *s) { return s == NULL ? LIST_FIRST(&brk->sessions) : LIST_NEXT(s, link); } #endif /* MG_ENABLE_MQTT_BROKER */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/dns.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_DNS /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/dns.h" */ static int mg_dns_tid = 0xa0; struct mg_dns_header { uint16_t transaction_id; uint16_t flags; uint16_t num_questions; uint16_t num_answers; uint16_t num_authority_prs; uint16_t num_other_prs; }; struct mg_dns_resource_record *mg_dns_next_record( struct mg_dns_message *msg, int query, struct mg_dns_resource_record *prev) { struct mg_dns_resource_record *rr; for (rr = (prev == NULL ? msg->answers : prev + 1); rr - msg->answers < msg->num_answers; rr++) { if (rr->rtype == query) { return rr; } } return NULL; } int mg_dns_parse_record_data(struct mg_dns_message *msg, struct mg_dns_resource_record *rr, void *data, size_t data_len) { switch (rr->rtype) { case MG_DNS_A_RECORD: if (data_len < sizeof(struct in_addr)) { return -1; } if (rr->rdata.p + data_len > msg->pkt.p + msg->pkt.len) { return -1; } memcpy(data, rr->rdata.p, data_len); return 0; #if MG_ENABLE_IPV6 case MG_DNS_AAAA_RECORD: if (data_len < sizeof(struct in6_addr)) { return -1; /* LCOV_EXCL_LINE */ } memcpy(data, rr->rdata.p, data_len); return 0; #endif case MG_DNS_CNAME_RECORD: mg_dns_uncompress_name(msg, &rr->rdata, (char *) data, data_len); return 0; } return -1; } int mg_dns_insert_header(struct mbuf *io, size_t pos, struct mg_dns_message *msg) { struct mg_dns_header header; memset(&header, 0, sizeof(header)); header.transaction_id = msg->transaction_id; header.flags = htons(msg->flags); header.num_questions = htons(msg->num_questions); header.num_answers = htons(msg->num_answers); return mbuf_insert(io, pos, &header, sizeof(header)); } int mg_dns_copy_questions(struct mbuf *io, struct mg_dns_message *msg) { unsigned char *begin, *end; struct mg_dns_resource_record *last_q; if (msg->num_questions <= 0) return 0; begin = (unsigned char *) msg->pkt.p + sizeof(struct mg_dns_header); last_q = &msg->questions[msg->num_questions - 1]; end = (unsigned char *) last_q->name.p + last_q->name.len + 4; return mbuf_append(io, begin, end - begin); } int mg_dns_encode_name(struct mbuf *io, const char *name, size_t len) { const char *s; unsigned char n; size_t pos = io->len; do { if ((s = strchr(name, '.')) == NULL) { s = name + len; } if (s - name > 127) { return -1; /* TODO(mkm) cover */ } n = s - name; /* chunk length */ mbuf_append(io, &n, 1); /* send length */ mbuf_append(io, name, n); if (*s == '.') { n++; } name += n; len -= n; } while (*s != '\0'); mbuf_append(io, "\0", 1); /* Mark end of host name */ return io->len - pos; } int mg_dns_encode_record(struct mbuf *io, struct mg_dns_resource_record *rr, const char *name, size_t nlen, const void *rdata, size_t rlen) { size_t pos = io->len; uint16_t u16; uint32_t u32; if (rr->kind == MG_DNS_INVALID_RECORD) { return -1; /* LCOV_EXCL_LINE */ } if (mg_dns_encode_name(io, name, nlen) == -1) { return -1; } u16 = htons(rr->rtype); mbuf_append(io, &u16, 2); u16 = htons(rr->rclass); mbuf_append(io, &u16, 2); if (rr->kind == MG_DNS_ANSWER) { u32 = htonl(rr->ttl); mbuf_append(io, &u32, 4); if (rr->rtype == MG_DNS_CNAME_RECORD) { int clen; /* fill size after encoding */ size_t off = io->len; mbuf_append(io, &u16, 2); if ((clen = mg_dns_encode_name(io, (const char *) rdata, rlen)) == -1) { return -1; } u16 = clen; io->buf[off] = u16 >> 8; io->buf[off + 1] = u16 & 0xff; } else { u16 = htons((uint16_t) rlen); mbuf_append(io, &u16, 2); mbuf_append(io, rdata, rlen); } } return io->len - pos; } void mg_send_dns_query(struct mg_connection *nc, const char *name, int query_type) { struct mg_dns_message *msg = (struct mg_dns_message *) MG_CALLOC(1, sizeof(*msg)); struct mbuf pkt; struct mg_dns_resource_record *rr = &msg->questions[0]; DBG(("%s %d", name, query_type)); mbuf_init(&pkt, 64 /* Start small, it'll grow as needed. */); msg->transaction_id = ++mg_dns_tid; msg->flags = 0x100; msg->num_questions = 1; mg_dns_insert_header(&pkt, 0, msg); rr->rtype = query_type; rr->rclass = 1; /* Class: inet */ rr->kind = MG_DNS_QUESTION; if (mg_dns_encode_record(&pkt, rr, name, strlen(name), NULL, 0) == -1) { /* TODO(mkm): return an error code */ goto cleanup; /* LCOV_EXCL_LINE */ } /* TCP DNS requires messages to be prefixed with len */ if (!(nc->flags & MG_F_UDP)) { uint16_t len = htons((uint16_t) pkt.len); mbuf_insert(&pkt, 0, &len, 2); } mg_send(nc, pkt.buf, pkt.len); mbuf_free(&pkt); cleanup: MG_FREE(msg); } static unsigned char *mg_parse_dns_resource_record( unsigned char *data, unsigned char *end, struct mg_dns_resource_record *rr, int reply) { unsigned char *name = data; int chunk_len, data_len; while (data < end && (chunk_len = *data)) { if (((unsigned char *) data)[0] & 0xc0) { data += 1; break; } data += chunk_len + 1; } if (data > end - 5) { return NULL; } rr->name.p = (char *) name; rr->name.len = data - name + 1; data++; rr->rtype = data[0] << 8 | data[1]; data += 2; rr->rclass = data[0] << 8 | data[1]; data += 2; rr->kind = reply ? MG_DNS_ANSWER : MG_DNS_QUESTION; if (reply) { if (data >= end - 6) { return NULL; } rr->ttl = (uint32_t) data[0] << 24 | (uint32_t) data[1] << 16 | data[2] << 8 | data[3]; data += 4; data_len = *data << 8 | *(data + 1); data += 2; rr->rdata.p = (char *) data; rr->rdata.len = data_len; data += data_len; } return data; } int mg_parse_dns(const char *buf, int len, struct mg_dns_message *msg) { struct mg_dns_header *header = (struct mg_dns_header *) buf; unsigned char *data = (unsigned char *) buf + sizeof(*header); unsigned char *end = (unsigned char *) buf + len; int i; memset(msg, 0, sizeof(*msg)); msg->pkt.p = buf; msg->pkt.len = len; if (len < (int) sizeof(*header)) return -1; msg->transaction_id = header->transaction_id; msg->flags = ntohs(header->flags); msg->num_questions = ntohs(header->num_questions); if (msg->num_questions > (int) ARRAY_SIZE(msg->questions)) { msg->num_questions = (int) ARRAY_SIZE(msg->questions); } msg->num_answers = ntohs(header->num_answers); if (msg->num_answers > (int) ARRAY_SIZE(msg->answers)) { msg->num_answers = (int) ARRAY_SIZE(msg->answers); } for (i = 0; i < msg->num_questions; i++) { data = mg_parse_dns_resource_record(data, end, &msg->questions[i], 0); if (data == NULL) return -1; } for (i = 0; i < msg->num_answers; i++) { data = mg_parse_dns_resource_record(data, end, &msg->answers[i], 1); if (data == NULL) return -1; } return 0; } size_t mg_dns_uncompress_name(struct mg_dns_message *msg, struct mg_str *name, char *dst, int dst_len) { int chunk_len; char *old_dst = dst; const unsigned char *data = (unsigned char *) name->p; const unsigned char *end = (unsigned char *) msg->pkt.p + msg->pkt.len; if (data >= end) { return 0; } while ((chunk_len = *data++)) { int leeway = dst_len - (dst - old_dst); if (data >= end) { return 0; } if (chunk_len & 0xc0) { uint16_t off = (data[-1] & (~0xc0)) << 8 | data[0]; if (off >= msg->pkt.len) { return 0; } data = (unsigned char *) msg->pkt.p + off; continue; } if (chunk_len > leeway) { chunk_len = leeway; } if (data + chunk_len >= end) { return 0; } memcpy(dst, data, chunk_len); data += chunk_len; dst += chunk_len; leeway -= chunk_len; if (leeway == 0) { return dst - old_dst; } *dst++ = '.'; } if (dst != old_dst) { *--dst = 0; } return dst - old_dst; } static void dns_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { struct mbuf *io = &nc->recv_mbuf; struct mg_dns_message msg; /* Pass low-level events to the user handler */ nc->handler(nc, ev, ev_data MG_UD_ARG(user_data)); switch (ev) { case MG_EV_RECV: if (!(nc->flags & MG_F_UDP)) { mbuf_remove(&nc->recv_mbuf, 2); } if (mg_parse_dns(nc->recv_mbuf.buf, nc->recv_mbuf.len, &msg) == -1) { /* reply + recursion allowed + format error */ memset(&msg, 0, sizeof(msg)); msg.flags = 0x8081; mg_dns_insert_header(io, 0, &msg); if (!(nc->flags & MG_F_UDP)) { uint16_t len = htons((uint16_t) io->len); mbuf_insert(io, 0, &len, 2); } mg_send(nc, io->buf, io->len); } else { /* Call user handler with parsed message */ nc->handler(nc, MG_DNS_MESSAGE, &msg MG_UD_ARG(user_data)); } mbuf_remove(io, io->len); break; } } void mg_set_protocol_dns(struct mg_connection *nc) { nc->proto_handler = dns_handler; } #endif /* MG_ENABLE_DNS */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/dns_server.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_DNS_SERVER /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/dns-server.h" */ struct mg_dns_reply mg_dns_create_reply(struct mbuf *io, struct mg_dns_message *msg) { struct mg_dns_reply rep; rep.msg = msg; rep.io = io; rep.start = io->len; /* reply + recursion allowed */ msg->flags |= 0x8080; mg_dns_copy_questions(io, msg); msg->num_answers = 0; return rep; } void mg_dns_send_reply(struct mg_connection *nc, struct mg_dns_reply *r) { size_t sent = r->io->len - r->start; mg_dns_insert_header(r->io, r->start, r->msg); if (!(nc->flags & MG_F_UDP)) { uint16_t len = htons((uint16_t) sent); mbuf_insert(r->io, r->start, &len, 2); } if (&nc->send_mbuf != r->io) { mg_send(nc, r->io->buf + r->start, r->io->len - r->start); r->io->len = r->start; } } int mg_dns_reply_record(struct mg_dns_reply *reply, struct mg_dns_resource_record *question, const char *name, int rtype, int ttl, const void *rdata, size_t rdata_len) { struct mg_dns_message *msg = (struct mg_dns_message *) reply->msg; char rname[512]; struct mg_dns_resource_record *ans = &msg->answers[msg->num_answers]; if (msg->num_answers >= MG_MAX_DNS_ANSWERS) { return -1; /* LCOV_EXCL_LINE */ } if (name == NULL) { name = rname; rname[511] = 0; mg_dns_uncompress_name(msg, &question->name, rname, sizeof(rname) - 1); } *ans = *question; ans->kind = MG_DNS_ANSWER; ans->rtype = rtype; ans->ttl = ttl; if (mg_dns_encode_record(reply->io, ans, name, strlen(name), rdata, rdata_len) == -1) { return -1; /* LCOV_EXCL_LINE */ }; msg->num_answers++; return 0; } #endif /* MG_ENABLE_DNS_SERVER */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/resolv.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_ASYNC_RESOLVER /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/resolv.h" */ #ifndef MG_DEFAULT_NAMESERVER #define MG_DEFAULT_NAMESERVER "8.8.8.8" #endif struct mg_resolve_async_request { char name[1024]; int query; mg_resolve_callback_t callback; void *data; time_t timeout; int max_retries; enum mg_resolve_err err; /* state */ time_t last_time; int retries; }; /* * Find what nameserver to use. * * Return 0 if OK, -1 if error */ static int mg_get_ip_address_of_nameserver(char *name, size_t name_len) { int ret = -1; #ifdef _WIN32 int i; LONG err; HKEY hKey, hSub; wchar_t subkey[512], value[128], *key = L"SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces"; if ((err = RegOpenKeyExW(HKEY_LOCAL_MACHINE, key, 0, KEY_READ, &hKey)) != ERROR_SUCCESS) { fprintf(stderr, "cannot open reg key %S: %ld\n", key, err); ret = -1; } else { for (ret = -1, i = 0; 1; i++) { DWORD subkey_size = sizeof(subkey), type, len = sizeof(value); if (RegEnumKeyExW(hKey, i, subkey, &subkey_size, NULL, NULL, NULL, NULL) != ERROR_SUCCESS) { break; } if (RegOpenKeyExW(hKey, subkey, 0, KEY_READ, &hSub) == ERROR_SUCCESS && (RegQueryValueExW(hSub, L"NameServer", 0, &type, (void *) value, &len) == ERROR_SUCCESS || RegQueryValueExW(hSub, L"DhcpNameServer", 0, &type, (void *) value, &len) == ERROR_SUCCESS)) { /* * See https://github.com/cesanta/mongoose/issues/176 * The value taken from the registry can be empty, a single * IP address, or multiple IP addresses separated by comma. * If it's empty, check the next interface. * If it's multiple IP addresses, take the first one. */ wchar_t *comma = wcschr(value, ','); if (value[0] == '\0') { continue; } if (comma != NULL) { *comma = '\0'; } /* %S will convert wchar_t -> char */ snprintf(name, name_len, "%S", value); ret = 0; RegCloseKey(hSub); break; } } RegCloseKey(hKey); } #elif MG_ENABLE_FILESYSTEM FILE *fp; char line[512]; if ((fp = mg_fopen("/etc/resolv.conf", "r")) == NULL) { ret = -1; } else { /* Try to figure out what nameserver to use */ for (ret = -1; fgets(line, sizeof(line), fp) != NULL;) { unsigned int a, b, c, d; if (sscanf(line, "nameserver %u.%u.%u.%u", &a, &b, &c, &d) == 4) { snprintf(name, name_len, "%u.%u.%u.%u", a, b, c, d); ret = 0; break; } } (void) fclose(fp); } #else snprintf(name, name_len, "%s", MG_DEFAULT_NAMESERVER); #endif /* _WIN32 */ return ret; } int mg_resolve_from_hosts_file(const char *name, union socket_address *usa) { #if MG_ENABLE_FILESYSTEM /* TODO(mkm) cache /etc/hosts */ FILE *fp; char line[1024]; char *p; char alias[256]; unsigned int a, b, c, d; int len = 0; if ((fp = mg_fopen("/etc/hosts", "r")) == NULL) { return -1; } for (; fgets(line, sizeof(line), fp) != NULL;) { if (line[0] == '#') continue; if (sscanf(line, "%u.%u.%u.%u%n", &a, &b, &c, &d, &len) == 0) { /* TODO(mkm): handle ipv6 */ continue; } for (p = line + len; sscanf(p, "%s%n", alias, &len) == 1; p += len) { if (strcmp(alias, name) == 0) { usa->sin.sin_addr.s_addr = htonl(a << 24 | b << 16 | c << 8 | d); fclose(fp); return 0; } } } fclose(fp); #else (void) name; (void) usa; #endif return -1; } static void mg_resolve_async_eh(struct mg_connection *nc, int ev, void *data MG_UD_ARG(void *user_data)) { time_t now = (time_t) mg_time(); struct mg_resolve_async_request *req; struct mg_dns_message *msg; int first = 0; #if !MG_ENABLE_CALLBACK_USERDATA void *user_data = nc->user_data; #endif if (ev != MG_EV_POLL) DBG(("ev=%d user_data=%p", ev, user_data)); req = (struct mg_resolve_async_request *) user_data; if (req == NULL) { return; } switch (ev) { case MG_EV_CONNECT: /* don't depend on timer not being at epoch for sending out first req */ first = 1; /* fallthrough */ case MG_EV_POLL: if (req->retries > req->max_retries) { req->err = MG_RESOLVE_EXCEEDED_RETRY_COUNT; nc->flags |= MG_F_CLOSE_IMMEDIATELY; break; } if (first || now - req->last_time >= req->timeout) { mg_send_dns_query(nc, req->name, req->query); req->last_time = now; req->retries++; } break; case MG_EV_RECV: msg = (struct mg_dns_message *) MG_MALLOC(sizeof(*msg)); if (mg_parse_dns(nc->recv_mbuf.buf, *(int *) data, msg) == 0 && msg->num_answers > 0) { req->callback(msg, req->data, MG_RESOLVE_OK); nc->user_data = NULL; MG_FREE(req); } else { req->err = MG_RESOLVE_NO_ANSWERS; } MG_FREE(msg); nc->flags |= MG_F_CLOSE_IMMEDIATELY; break; case MG_EV_SEND: /* * If a send error occurs, prevent closing of the connection by the core. * We will retry after timeout. */ nc->flags &= ~MG_F_CLOSE_IMMEDIATELY; mbuf_remove(&nc->send_mbuf, nc->send_mbuf.len); break; case MG_EV_TIMER: req->err = MG_RESOLVE_TIMEOUT; nc->flags |= MG_F_CLOSE_IMMEDIATELY; break; case MG_EV_CLOSE: /* If we got here with request still not done, fire an error callback. */ if (req != NULL) { req->callback(NULL, req->data, req->err); nc->user_data = NULL; MG_FREE(req); } break; } } int mg_resolve_async(struct mg_mgr *mgr, const char *name, int query, mg_resolve_callback_t cb, void *data) { struct mg_resolve_async_opts opts; memset(&opts, 0, sizeof(opts)); return mg_resolve_async_opt(mgr, name, query, cb, data, opts); } int mg_resolve_async_opt(struct mg_mgr *mgr, const char *name, int query, mg_resolve_callback_t cb, void *data, struct mg_resolve_async_opts opts) { struct mg_resolve_async_request *req; struct mg_connection *dns_nc; const char *nameserver = opts.nameserver; char dns_server_buff[17], nameserver_url[26]; if (nameserver == NULL) { nameserver = mgr->nameserver; } DBG(("%s %d %p", name, query, opts.dns_conn)); /* resolve with DNS */ req = (struct mg_resolve_async_request *) MG_CALLOC(1, sizeof(*req)); if (req == NULL) { return -1; } strncpy(req->name, name, sizeof(req->name)); req->query = query; req->callback = cb; req->data = data; /* TODO(mkm): parse defaults out of resolve.conf */ req->max_retries = opts.max_retries ? opts.max_retries : 2; req->timeout = opts.timeout ? opts.timeout : 5; /* Lazily initialize dns server */ if (nameserver == NULL) { if (mg_get_ip_address_of_nameserver(dns_server_buff, sizeof(dns_server_buff)) != -1) { nameserver = dns_server_buff; } else { nameserver = MG_DEFAULT_NAMESERVER; } } snprintf(nameserver_url, sizeof(nameserver_url), "udp://%s:53", nameserver); dns_nc = mg_connect(mgr, nameserver_url, MG_CB(mg_resolve_async_eh, NULL)); if (dns_nc == NULL) { MG_FREE(req); return -1; } dns_nc->user_data = req; if (opts.dns_conn != NULL) { *opts.dns_conn = dns_nc; } return 0; } void mg_set_nameserver(struct mg_mgr *mgr, const char *nameserver) { MG_FREE((char *) mgr->nameserver); if (nameserver != NULL) { mgr->nameserver = strdup(nameserver); } } #endif /* MG_ENABLE_ASYNC_RESOLVER */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/coap.c" #endif /* * Copyright (c) 2015 Cesanta Software Limited * All rights reserved * This software is dual-licensed: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. For the terms of this * license, see <http://www.gnu.org/licenses/>. * * You are free to use this software under the terms of the GNU General * Public License, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * Alternatively, you can license this software under a commercial * license, as set out in <https://www.cesanta.com/license>. */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/coap.h" */ #if MG_ENABLE_COAP void mg_coap_free_options(struct mg_coap_message *cm) { while (cm->options != NULL) { struct mg_coap_option *next = cm->options->next; MG_FREE(cm->options); cm->options = next; } } struct mg_coap_option *mg_coap_add_option(struct mg_coap_message *cm, uint32_t number, char *value, size_t len) { struct mg_coap_option *new_option = (struct mg_coap_option *) MG_CALLOC(1, sizeof(*new_option)); new_option->number = number; new_option->value.p = value; new_option->value.len = len; if (cm->options == NULL) { cm->options = cm->optiomg_tail = new_option; } else { /* * A very simple attention to help clients to compose options: * CoAP wants to see options ASC ordered. * Could be change by using sort in coap_compose */ if (cm->optiomg_tail->number <= new_option->number) { /* if option is already ordered just add it */ cm->optiomg_tail = cm->optiomg_tail->next = new_option; } else { /* looking for appropriate position */ struct mg_coap_option *current_opt = cm->options; struct mg_coap_option *prev_opt = 0; while (current_opt != NULL) { if (current_opt->number > new_option->number) { break; } prev_opt = current_opt; current_opt = current_opt->next; } if (prev_opt != NULL) { prev_opt->next = new_option; new_option->next = current_opt; } else { /* insert new_option to the beginning */ new_option->next = cm->options; cm->options = new_option; } } } return new_option; } /* * Fills CoAP header in mg_coap_message. * * Helper function. */ static char *coap_parse_header(char *ptr, struct mbuf *io, struct mg_coap_message *cm) { if (io->len < sizeof(uint32_t)) { cm->flags |= MG_COAP_NOT_ENOUGH_DATA; return NULL; } /* * Version (Ver): 2-bit unsigned integer. Indicates the CoAP version * number. Implementations of this specification MUST set this field * to 1 (01 binary). Other values are reserved for future versions. * Messages with unknown version numbers MUST be silently ignored. */ if (((uint8_t) *ptr >> 6) != 1) { cm->flags |= MG_COAP_IGNORE; return NULL; } /* * Type (T): 2-bit unsigned integer. Indicates if this message is of * type Confirmable (0), Non-confirmable (1), Acknowledgement (2), or * Reset (3). */ cm->msg_type = ((uint8_t) *ptr & 0x30) >> 4; cm->flags |= MG_COAP_MSG_TYPE_FIELD; /* * Token Length (TKL): 4-bit unsigned integer. Indicates the length of * the variable-length Token field (0-8 bytes). Lengths 9-15 are * reserved, MUST NOT be sent, and MUST be processed as a message * format error. */ cm->token.len = *ptr & 0x0F; if (cm->token.len > 8) { cm->flags |= MG_COAP_FORMAT_ERROR; return NULL; } ptr++; /* * Code: 8-bit unsigned integer, split into a 3-bit class (most * significant bits) and a 5-bit detail (least significant bits) */ cm->code_class = (uint8_t) *ptr >> 5; cm->code_detail = *ptr & 0x1F; cm->flags |= (MG_COAP_CODE_CLASS_FIELD | MG_COAP_CODE_DETAIL_FIELD); ptr++; /* Message ID: 16-bit unsigned integer in network byte order. */ cm->msg_id = (uint8_t) *ptr << 8 | (uint8_t) * (ptr + 1); cm->flags |= MG_COAP_MSG_ID_FIELD; ptr += 2; return ptr; } /* * Fills token information in mg_coap_message. * * Helper function. */ static char *coap_get_token(char *ptr, struct mbuf *io, struct mg_coap_message *cm) { if (cm->token.len != 0) { if (ptr + cm->token.len > io->buf + io->len) { cm->flags |= MG_COAP_NOT_ENOUGH_DATA; return NULL; } else { cm->token.p = ptr; ptr += cm->token.len; cm->flags |= MG_COAP_TOKEN_FIELD; } } return ptr; } /* * Returns Option Delta or Length. * * Helper function. */ static int coap_get_ext_opt(char *ptr, struct mbuf *io, uint16_t *opt_info) { int ret = 0; if (*opt_info == 13) { /* * 13: An 8-bit unsigned integer follows the initial byte and * indicates the Option Delta/Length minus 13. */ if (ptr < io->buf + io->len) { *opt_info = (uint8_t) *ptr + 13; ret = sizeof(uint8_t); } else { ret = -1; /* LCOV_EXCL_LINE */ } } else if (*opt_info == 14) { /* * 14: A 16-bit unsigned integer in network byte order follows the * initial byte and indicates the Option Delta/Length minus 269. */ if (ptr + sizeof(uint8_t) < io->buf + io->len) { *opt_info = ((uint8_t) *ptr << 8 | (uint8_t) * (ptr + 1)) + 269; ret = sizeof(uint16_t); } else { ret = -1; /* LCOV_EXCL_LINE */ } } return ret; } /* * Fills options in mg_coap_message. * * Helper function. * * General options format: * +---------------+---------------+ * | Option Delta | Option Length | 1 byte * +---------------+---------------+ * \ Option Delta (extended) \ 0-2 bytes * +-------------------------------+ * / Option Length (extended) \ 0-2 bytes * +-------------------------------+ * \ Option Value \ 0 or more bytes * +-------------------------------+ */ static char *coap_get_options(char *ptr, struct mbuf *io, struct mg_coap_message *cm) { uint16_t prev_opt = 0; if (ptr == io->buf + io->len) { /* end of packet, ok */ return NULL; } /* 0xFF is payload marker */ while (ptr < io->buf + io->len && (uint8_t) *ptr != 0xFF) { uint16_t option_delta, option_lenght; int optinfo_len; /* Option Delta: 4-bit unsigned integer */ option_delta = ((uint8_t) *ptr & 0xF0) >> 4; /* Option Length: 4-bit unsigned integer */ option_lenght = *ptr & 0x0F; if (option_delta == 15 || option_lenght == 15) { /* * 15: Reserved for future use. If the field is set to this value, * it MUST be processed as a message format error */ cm->flags |= MG_COAP_FORMAT_ERROR; break; } ptr++; /* check for extended option delta */ optinfo_len = coap_get_ext_opt(ptr, io, &option_delta); if (optinfo_len == -1) { cm->flags |= MG_COAP_NOT_ENOUGH_DATA; /* LCOV_EXCL_LINE */ break; /* LCOV_EXCL_LINE */ } ptr += optinfo_len; /* check or extended option lenght */ optinfo_len = coap_get_ext_opt(ptr, io, &option_lenght); if (optinfo_len == -1) { cm->flags |= MG_COAP_NOT_ENOUGH_DATA; /* LCOV_EXCL_LINE */ break; /* LCOV_EXCL_LINE */ } ptr += optinfo_len; /* * Instead of specifying the Option Number directly, the instances MUST * appear in order of their Option Numbers and a delta encoding is used * between them. */ option_delta += prev_opt; mg_coap_add_option(cm, option_delta, ptr, option_lenght); prev_opt = option_delta; if (ptr + option_lenght > io->buf + io->len) { cm->flags |= MG_COAP_NOT_ENOUGH_DATA; /* LCOV_EXCL_LINE */ break; /* LCOV_EXCL_LINE */ } ptr += option_lenght; } if ((cm->flags & MG_COAP_ERROR) != 0) { mg_coap_free_options(cm); return NULL; } cm->flags |= MG_COAP_OPTIOMG_FIELD; if (ptr == io->buf + io->len) { /* end of packet, ok */ return NULL; } ptr++; return ptr; } uint32_t mg_coap_parse(struct mbuf *io, struct mg_coap_message *cm) { char *ptr; memset(cm, 0, sizeof(*cm)); if ((ptr = coap_parse_header(io->buf, io, cm)) == NULL) { return cm->flags; } if ((ptr = coap_get_token(ptr, io, cm)) == NULL) { return cm->flags; } if ((ptr = coap_get_options(ptr, io, cm)) == NULL) { return cm->flags; } /* the rest is payload */ cm->payload.len = io->len - (ptr - io->buf); if (cm->payload.len != 0) { cm->payload.p = ptr; cm->flags |= MG_COAP_PAYLOAD_FIELD; } return cm->flags; } /* * Calculates extended size of given Opt Number/Length in coap message. * * Helper function. */ static size_t coap_get_ext_opt_size(uint32_t value) { int ret = 0; if (value >= 13 && value <= 0xFF + 13) { ret = sizeof(uint8_t); } else if (value > 0xFF + 13 && value <= 0xFFFF + 269) { ret = sizeof(uint16_t); } return ret; } /* * Splits given Opt Number/Length into base and ext values. * * Helper function. */ static int coap_split_opt(uint32_t value, uint8_t *base, uint16_t *ext) { int ret = 0; if (value < 13) { *base = value; } else if (value >= 13 && value <= 0xFF + 13) { *base = 13; *ext = value - 13; ret = sizeof(uint8_t); } else if (value > 0xFF + 13 && value <= 0xFFFF + 269) { *base = 14; *ext = value - 269; ret = sizeof(uint16_t); } return ret; } /* * Puts uint16_t (in network order) into given char stream. * * Helper function. */ static char *coap_add_uint16(char *ptr, uint16_t val) { *ptr = val >> 8; ptr++; *ptr = val & 0x00FF; ptr++; return ptr; } /* * Puts extended value of Opt Number/Length into given char stream. * * Helper function. */ static char *coap_add_opt_info(char *ptr, uint16_t val, size_t len) { if (len == sizeof(uint8_t)) { *ptr = (char) val; ptr++; } else if (len == sizeof(uint16_t)) { ptr = coap_add_uint16(ptr, val); } return ptr; } /* * Verifies given mg_coap_message and calculates message size for it. * * Helper function. */ static uint32_t coap_calculate_packet_size(struct mg_coap_message *cm, size_t *len) { struct mg_coap_option *opt; uint32_t prev_opt_number; *len = 4; /* header */ if (cm->msg_type > MG_COAP_MSG_MAX) { return MG_COAP_ERROR | MG_COAP_MSG_TYPE_FIELD; } if (cm->token.len > 8) { return MG_COAP_ERROR | MG_COAP_TOKEN_FIELD; } if (cm->code_class > 7) { return MG_COAP_ERROR | MG_COAP_CODE_CLASS_FIELD; } if (cm->code_detail > 31) { return MG_COAP_ERROR | MG_COAP_CODE_DETAIL_FIELD; } *len += cm->token.len; if (cm->payload.len != 0) { *len += cm->payload.len + 1; /* ... + 1; add payload marker */ } opt = cm->options; prev_opt_number = 0; while (opt != NULL) { *len += 1; /* basic delta/length */ *len += coap_get_ext_opt_size(opt->number - prev_opt_number); *len += coap_get_ext_opt_size((uint32_t) opt->value.len); /* * Current implementation performs check if * option_number > previous option_number and produces an error * TODO(alashkin): write design doc with limitations * May be resorting is more suitable solution. */ if ((opt->next != NULL && opt->number > opt->next->number) || opt->value.len > 0xFFFF + 269 || opt->number - prev_opt_number > 0xFFFF + 269) { return MG_COAP_ERROR | MG_COAP_OPTIOMG_FIELD; } *len += opt->value.len; prev_opt_number = opt->number; opt = opt->next; } return 0; } uint32_t mg_coap_compose(struct mg_coap_message *cm, struct mbuf *io) { struct mg_coap_option *opt; uint32_t res, prev_opt_number; size_t prev_io_len, packet_size; char *ptr; res = coap_calculate_packet_size(cm, &packet_size); if (res != 0) { return res; } /* saving previous lenght to handle non-empty mbuf */ prev_io_len = io->len; mbuf_append(io, NULL, packet_size); ptr = io->buf + prev_io_len; /* * since cm is verified, it is possible to use bits shift operator * without additional zeroing of unused bits */ /* ver: 2 bits, msg_type: 2 bits, toklen: 4 bits */ *ptr = (1 << 6) | (cm->msg_type << 4) | (uint8_t)(cm->token.len); ptr++; /* code class: 3 bits, code detail: 5 bits */ *ptr = (cm->code_class << 5) | (cm->code_detail); ptr++; ptr = coap_add_uint16(ptr, cm->msg_id); if (cm->token.len != 0) { memcpy(ptr, cm->token.p, cm->token.len); ptr += cm->token.len; } opt = cm->options; prev_opt_number = 0; while (opt != NULL) { uint8_t delta_base = 0, length_base = 0; uint16_t delta_ext = 0, length_ext = 0; size_t opt_delta_len = coap_split_opt(opt->number - prev_opt_number, &delta_base, &delta_ext); size_t opt_lenght_len = coap_split_opt((uint32_t) opt->value.len, &length_base, &length_ext); *ptr = (delta_base << 4) | length_base; ptr++; ptr = coap_add_opt_info(ptr, delta_ext, opt_delta_len); ptr = coap_add_opt_info(ptr, length_ext, opt_lenght_len); if (opt->value.len != 0) { memcpy(ptr, opt->value.p, opt->value.len); ptr += opt->value.len; } prev_opt_number = opt->number; opt = opt->next; } if (cm->payload.len != 0) { *ptr = (char) -1; ptr++; memcpy(ptr, cm->payload.p, cm->payload.len); } return 0; } uint32_t mg_coap_send_message(struct mg_connection *nc, struct mg_coap_message *cm) { struct mbuf packet_out; uint32_t compose_res; mbuf_init(&packet_out, 0); compose_res = mg_coap_compose(cm, &packet_out); if (compose_res != 0) { return compose_res; /* LCOV_EXCL_LINE */ } mg_send(nc, packet_out.buf, (int) packet_out.len); mbuf_free(&packet_out); return 0; } uint32_t mg_coap_send_ack(struct mg_connection *nc, uint16_t msg_id) { struct mg_coap_message cm; memset(&cm, 0, sizeof(cm)); cm.msg_type = MG_COAP_MSG_ACK; cm.msg_id = msg_id; return mg_coap_send_message(nc, &cm); } static void coap_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { struct mbuf *io = &nc->recv_mbuf; struct mg_coap_message cm; uint32_t parse_res; memset(&cm, 0, sizeof(cm)); nc->handler(nc, ev, ev_data MG_UD_ARG(user_data)); switch (ev) { case MG_EV_RECV: parse_res = mg_coap_parse(io, &cm); if ((parse_res & MG_COAP_IGNORE) == 0) { if ((cm.flags & MG_COAP_NOT_ENOUGH_DATA) != 0) { /* * Since we support UDP only * MG_COAP_NOT_ENOUGH_DATA == MG_COAP_FORMAT_ERROR */ cm.flags |= MG_COAP_FORMAT_ERROR; /* LCOV_EXCL_LINE */ } /* LCOV_EXCL_LINE */ nc->handler(nc, MG_COAP_EVENT_BASE + cm.msg_type, &cm MG_UD_ARG(user_data)); } mg_coap_free_options(&cm); mbuf_remove(io, io->len); break; } } /* * Attach built-in CoAP event handler to the given connection. * * The user-defined event handler will receive following extra events: * * - MG_EV_COAP_CON * - MG_EV_COAP_NOC * - MG_EV_COAP_ACK * - MG_EV_COAP_RST */ int mg_set_protocol_coap(struct mg_connection *nc) { /* supports UDP only */ if ((nc->flags & MG_F_UDP) == 0) { return -1; } nc->proto_handler = coap_handler; return 0; } #endif /* MG_ENABLE_COAP */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/tun.c" #endif /* * Copyright (c) 2014 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_TUN /* Amalgamated: #include "common/cs_dbg.h" */ /* Amalgamated: #include "mongoose/src/http.h" */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/net.h" */ /* Amalgamated: #include "mongoose/src/net_if_tun.h" */ /* Amalgamated: #include "mongoose/src/tun.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ static void mg_tun_reconnect(struct mg_tun_client *client, int timeout); static void mg_tun_init_client(struct mg_tun_client *client, struct mg_mgr *mgr, struct mg_iface *iface, const char *dispatcher, struct mg_tun_ssl_opts ssl) { client->mgr = mgr; client->iface = iface; client->disp_url = dispatcher; client->last_stream_id = 0; client->ssl = ssl; client->disp = NULL; /* will be set by mg_tun_reconnect */ client->listener = NULL; /* will be set by mg_do_bind */ client->reconnect = NULL; /* will be set by mg_tun_reconnect */ } void mg_tun_log_frame(struct mg_tun_frame *frame) { LOG(LL_DEBUG, ("Got TUN frame: type=0x%x, flags=0x%x stream_id=0x%lx, " "len=%zu", frame->type, frame->flags, frame->stream_id, frame->body.len)); #if MG_ENABLE_HEXDUMP { char hex[512]; mg_hexdump(frame->body.p, frame->body.len, hex, sizeof(hex) - 1); hex[sizeof(hex) - 1] = '\0'; LOG(LL_DEBUG, ("body:\n%s", hex)); } #else LOG(LL_DEBUG, ("body: '%.*s'", (int) frame->body.len, frame->body.p)); #endif } static void mg_tun_close_all(struct mg_tun_client *client) { struct mg_connection *nc; for (nc = client->mgr->active_connections; nc != NULL; nc = nc->next) { if (nc->iface == client->iface && !(nc->flags & MG_F_LISTENING)) { LOG(LL_DEBUG, ("Closing tunneled connection %p", nc)); nc->flags |= MG_F_CLOSE_IMMEDIATELY; /* mg_close_conn(nc); */ } } } static void mg_tun_client_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { #if !MG_ENABLE_CALLBACK_USERDATA void *user_data = nc->user_data; #else (void) nc; #endif struct mg_tun_client *client = (struct mg_tun_client *) user_data; switch (ev) { case MG_EV_CONNECT: { int err = *(int *) ev_data; if (err) { LOG(LL_ERROR, ("Cannot connect to the tunnel dispatcher: %d", err)); } else { LOG(LL_INFO, ("Connected to the tunnel dispatcher")); } break; } case MG_EV_HTTP_REPLY: { struct http_message *hm = (struct http_message *) ev_data; if (hm->resp_code != 200) { LOG(LL_ERROR, ("Tunnel dispatcher reply non-OK status code %d", hm->resp_code)); } break; } case MG_EV_WEBSOCKET_HANDSHAKE_DONE: { LOG(LL_INFO, ("Tunnel dispatcher handshake done")); break; } case MG_EV_WEBSOCKET_FRAME: { struct websocket_message *wm = (struct websocket_message *) ev_data; struct mg_connection *tc; struct mg_tun_frame frame; if (mg_tun_parse_frame(wm->data, wm->size, &frame) == -1) { LOG(LL_ERROR, ("Got invalid tun frame dropping", wm->size)); break; } mg_tun_log_frame(&frame); tc = mg_tun_if_find_conn(client, frame.stream_id); if (tc == NULL) { if (frame.body.len > 0) { LOG(LL_DEBUG, ("Got frame after receiving end has been closed")); } break; } if (frame.body.len > 0) { mg_if_recv_tcp_cb(tc, (void *) frame.body.p, frame.body.len, 0 /* own */); } if (frame.flags & MG_TUN_F_END_STREAM) { LOG(LL_DEBUG, ("Closing tunneled connection because got end of stream " "from other end")); tc->flags |= MG_F_CLOSE_IMMEDIATELY; mg_close_conn(tc); } break; } case MG_EV_CLOSE: { LOG(LL_DEBUG, ("Closing all tunneled connections")); /* * The client might have been already freed when the listening socket is * closed. */ if (client != NULL) { mg_tun_close_all(client); client->disp = NULL; LOG(LL_INFO, ("Dispatcher connection is no more, reconnecting")); /* TODO(mkm): implement exp back off */ mg_tun_reconnect(client, MG_TUN_RECONNECT_INTERVAL); } break; } default: break; } } static void mg_tun_do_reconnect(struct mg_tun_client *client) { struct mg_connection *dc; struct mg_connect_opts opts; memset(&opts, 0, sizeof(opts)); #if MG_ENABLE_SSL opts.ssl_cert = client->ssl.ssl_cert; opts.ssl_key = client->ssl.ssl_key; opts.ssl_ca_cert = client->ssl.ssl_ca_cert; #endif /* HTTP/Websocket listener */ if ((dc = mg_connect_ws_opt(client->mgr, MG_CB(mg_tun_client_handler, client), opts, client->disp_url, MG_TUN_PROTO_NAME, NULL)) == NULL) { LOG(LL_ERROR, ("Cannot connect to WS server on addr [%s]\n", client->disp_url)); return; } client->disp = dc; #if !MG_ENABLE_CALLBACK_USERDATA dc->user_data = client; #endif } void mg_tun_reconnect_ev_handler(struct mg_connection *nc, int ev, void *ev_data MG_UD_ARG(void *user_data)) { #if !MG_ENABLE_CALLBACK_USERDATA void *user_data = nc->user_data; #else (void) nc; #endif struct mg_tun_client *client = (struct mg_tun_client *) user_data; (void) ev_data; switch (ev) { case MG_EV_TIMER: if (!(client->listener->flags & MG_F_TUN_DO_NOT_RECONNECT)) { mg_tun_do_reconnect(client); } else { /* Reconnecting is suppressed, we'll check again at the next poll */ mg_tun_reconnect(client, 0); } break; } } static void mg_tun_reconnect(struct mg_tun_client *client, int timeout) { if (client->reconnect == NULL) { client->reconnect = mg_add_sock(client->mgr, INVALID_SOCKET, MG_CB(mg_tun_reconnect_ev_handler, client)); #if !MG_ENABLE_CALLBACK_USERDATA client->reconnect->user_data = client; #endif } client->reconnect->ev_timer_time = mg_time() + timeout; } static struct mg_tun_client *mg_tun_create_client(struct mg_mgr *mgr, const char *dispatcher, struct mg_tun_ssl_opts ssl) { struct mg_tun_client *client = NULL; struct mg_iface *iface = mg_find_iface(mgr, &mg_tun_iface_vtable, NULL); if (iface == NULL) { LOG(LL_ERROR, ("The tun feature requires the manager to have a tun " "interface enabled")); return NULL; } client = (struct mg_tun_client *) MG_MALLOC(sizeof(*client)); mg_tun_init_client(client, mgr, iface, dispatcher, ssl); iface->data = client; /* * We need to give application a chance to set MG_F_TUN_DO_NOT_RECONNECT on a * listening connection right after mg_tun_bind_opt() returned it, so we * should use mg_tun_reconnect() here, instead of mg_tun_do_reconnect() */ mg_tun_reconnect(client, 0); return client; } void mg_tun_destroy_client(struct mg_tun_client *client) { /* * NOTE: * `client` is NULL in case of OOM * `client->disp` is NULL if connection failed * `client->iface is NULL is `mg_find_iface` failed */ if (client != NULL && client->disp != NULL) { /* the dispatcher connection handler will in turn close all tunnels */ client->disp->flags |= MG_F_CLOSE_IMMEDIATELY; /* this is used as a signal to other tun handlers that the party is over */ client->disp->user_data = NULL; } if (client != NULL && client->reconnect != NULL) { client->reconnect->flags |= MG_F_CLOSE_IMMEDIATELY; } if (client != NULL && client->iface != NULL) { client->iface->data = NULL; } MG_FREE(client); } static struct mg_connection *mg_tun_do_bind(struct mg_tun_client *client, MG_CB(mg_event_handler_t handler, void *user_data), struct mg_bind_opts opts) { struct mg_connection *lc; opts.iface = client->iface; lc = mg_bind_opt(client->mgr, ":1234" /* dummy port */, MG_CB(handler, user_data), opts); client->listener = lc; return lc; } struct mg_connection *mg_tun_bind_opt(struct mg_mgr *mgr, const char *dispatcher, MG_CB(mg_event_handler_t handler, void *user_data), struct mg_bind_opts opts) { #if MG_ENABLE_SSL struct mg_tun_ssl_opts ssl = {opts.ssl_cert, opts.ssl_key, opts.ssl_ca_cert}; #else struct mg_tun_ssl_opts ssl = {0}; #endif struct mg_tun_client *client = mg_tun_create_client(mgr, dispatcher, ssl); if (client == NULL) { return NULL; } #if MG_ENABLE_SSL /* these options don't make sense in the local mouth of the tunnel */ opts.ssl_cert = NULL; opts.ssl_key = NULL; opts.ssl_ca_cert = NULL; #endif return mg_tun_do_bind(client, MG_CB(handler, user_data), opts); } int mg_tun_parse_frame(void *data, size_t len, struct mg_tun_frame *frame) { const size_t header_size = sizeof(uint32_t) + sizeof(uint8_t) * 2; if (len < header_size) { return -1; } frame->type = *(uint8_t *) (data); frame->flags = *(uint8_t *) ((char *) data + 1); memcpy(&frame->stream_id, (char *) data + 2, sizeof(uint32_t)); frame->stream_id = ntohl(frame->stream_id); frame->body.p = (char *) data + header_size; frame->body.len = len - header_size; return 0; } void mg_tun_send_frame(struct mg_connection *ws, uint32_t stream_id, uint8_t type, uint8_t flags, struct mg_str msg) { stream_id = htonl(stream_id); { struct mg_str parts[] = { {(char *) &type, sizeof(type)}, {(char *) &flags, sizeof(flags)}, {(char *) &stream_id, sizeof(stream_id)}, {msg.p, msg.len} /* vc6 doesn't like just `msg` here */}; mg_send_websocket_framev(ws, WEBSOCKET_OP_BINARY, parts, sizeof(parts) / sizeof(parts[0])); } } #endif /* MG_ENABLE_TUN */ #ifdef MG_MODULE_LINES #line 1 "mongoose/src/sntp.c" #endif /* * Copyright (c) 2016 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/sntp.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ #if MG_ENABLE_SNTP #define SNTP_TIME_OFFSET 2208988800 #ifndef SNTP_TIMEOUT #define SNTP_TIMEOUT 10 #endif #ifndef SNTP_ATTEMPTS #define SNTP_ATTEMPTS 3 #endif static uint64_t mg_get_sec(uint64_t val) { return (val & 0xFFFFFFFF00000000) >> 32; } static uint64_t mg_get_usec(uint64_t val) { uint64_t tmp = (val & 0x00000000FFFFFFFF); tmp *= 1000000; tmp >>= 32; return tmp; } static void mg_ntp_to_tv(uint64_t val, struct timeval *tv) { uint64_t tmp; tmp = mg_get_sec(val); tmp -= SNTP_TIME_OFFSET; tv->tv_sec = tmp; tv->tv_usec = mg_get_usec(val); } static void mg_get_ntp_ts(const char *ntp, uint64_t *val) { uint32_t tmp; memcpy(&tmp, ntp, sizeof(tmp)); tmp = ntohl(tmp); *val = (uint64_t) tmp << 32; memcpy(&tmp, ntp + 4, sizeof(tmp)); tmp = ntohl(tmp); *val |= tmp; } void mg_sntp_send_request(struct mg_connection *c) { char buf[48] = {0}; /* * header - 8 bit: * LI (2 bit) - 3 (not in sync), VN (3 bit) - 4 (version), * mode (3 bit) - 3 (client) */ buf[0] = (3 << 6) | (4 << 3) | 3; /* * Next fields should be empty in client request * stratum, 8 bit * poll interval, 8 bit * rrecision, 8 bit * root delay, 32 bit * root dispersion, 32 bit * ref id, 32 bit * ref timestamp, 64 bit * originate Timestamp, 64 bit * receive Timestamp, 64 bit */ /* * convert time to sntp format (sntp starts from 00:00:00 01.01.1900) * according to rfc868 it is 2208988800L sec * this information is used to correct roundtrip delay * but if local clock is absolutely broken (and doesn't work even * as simple timer), it is better to disable it */ #ifndef MG_SNMP_NO_DELAY_CORRECTION uint32_t sec; sec = htonl((uint32_t)(mg_time() + SNTP_TIME_OFFSET)); memcpy(&buf[40], &sec, sizeof(sec)); #endif mg_send(c, buf, sizeof(buf)); } #ifndef MG_SNMP_NO_DELAY_CORRECTION static uint64_t mg_calculate_delay(uint64_t t1, uint64_t t2, uint64_t t3) { /* roundloop delay = (T4 - T1) - (T3 - T2) */ uint64_t d1 = ((mg_time() + SNTP_TIME_OFFSET) * 1000000) - (mg_get_sec(t1) * 1000000 + mg_get_usec(t1)); uint64_t d2 = (mg_get_sec(t3) * 1000000 + mg_get_usec(t3)) - (mg_get_sec(t2) * 1000000 + mg_get_usec(t2)); return (d1 > d2) ? d1 - d2 : 0; } #endif MG_INTERNAL int mg_sntp_parse_reply(const char *buf, int len, struct mg_sntp_message *msg) { uint8_t hdr; uint64_t orig_ts_T1, recv_ts_T2, trsm_ts_T3, delay = 0; int mode; struct timeval tv; (void) orig_ts_T1; (void) recv_ts_T2; if (len < 48) { return -1; } hdr = buf[0]; if ((hdr & 0x38) >> 3 != 4) { /* Wrong version */ return -1; } mode = hdr & 0x7; if (mode != 4 && mode != 5) { /* Not a server reply */ return -1; } memset(msg, 0, sizeof(*msg)); msg->kiss_of_death = (buf[1] == 0); /* Server asks to not send requests */ mg_get_ntp_ts(&buf[40], &trsm_ts_T3); #ifndef MG_SNMP_NO_DELAY_CORRECTION mg_get_ntp_ts(&buf[24], &orig_ts_T1); mg_get_ntp_ts(&buf[32], &recv_ts_T2); delay = mg_calculate_delay(orig_ts_T1, recv_ts_T2, trsm_ts_T3); #endif mg_ntp_to_tv(trsm_ts_T3, &tv); msg->time = (double) tv.tv_sec + (((double) tv.tv_usec + delay) / 1000000.0); return 0; } static void mg_sntp_handler(struct mg_connection *c, int ev, void *ev_data MG_UD_ARG(void *user_data)) { struct mbuf *io = &c->recv_mbuf; struct mg_sntp_message msg; c->handler(c, ev, ev_data MG_UD_ARG(user_data)); switch (ev) { case MG_EV_RECV: { if (mg_sntp_parse_reply(io->buf, io->len, &msg) < 0) { DBG(("Invalid SNTP packet received (%d)", (int) io->len)); c->handler(c, MG_SNTP_MALFORMED_REPLY, NULL MG_UD_ARG(user_data)); } else { c->handler(c, MG_SNTP_REPLY, (void *) &msg MG_UD_ARG(user_data)); } mbuf_remove(io, io->len); break; } } } int mg_set_protocol_sntp(struct mg_connection *c) { if ((c->flags & MG_F_UDP) == 0) { return -1; } c->proto_handler = mg_sntp_handler; return 0; } struct mg_connection *mg_sntp_connect(struct mg_mgr *mgr, MG_CB(mg_event_handler_t event_handler, void *user_data), const char *sntp_server_name) { struct mg_connection *c = NULL; char url[100], *p_url = url; const char *proto = "", *port = "", *tmp; /* If port is not specified, use default (123) */ tmp = strchr(sntp_server_name, ':'); if (tmp != NULL && *(tmp + 1) == '/') { tmp = strchr(tmp + 1, ':'); } if (tmp == NULL) { port = ":123"; } /* Add udp:// if needed */ if (strncmp(sntp_server_name, "udp://", 6) != 0) { proto = "udp://"; } mg_asprintf(&p_url, sizeof(url), "%s%s%s", proto, sntp_server_name, port); c = mg_connect(mgr, p_url, event_handler MG_UD_ARG(user_data)); if (c == NULL) { goto cleanup; } mg_set_protocol_sntp(c); cleanup: if (p_url != url) { MG_FREE(p_url); } return c; } struct sntp_data { mg_event_handler_t hander; int count; }; static void mg_sntp_util_ev_handler(struct mg_connection *c, int ev, void *ev_data MG_UD_ARG(void *user_data)) { #if !MG_ENABLE_CALLBACK_USERDATA void *user_data = c->user_data; #endif struct sntp_data *sd = (struct sntp_data *) user_data; switch (ev) { case MG_EV_CONNECT: if (*(int *) ev_data != 0) { mg_call(c, sd->hander, c->user_data, MG_SNTP_FAILED, NULL); break; } /* fallthrough */ case MG_EV_TIMER: if (sd->count <= SNTP_ATTEMPTS) { mg_sntp_send_request(c); mg_set_timer(c, mg_time() + 10); sd->count++; } else { mg_call(c, sd->hander, c->user_data, MG_SNTP_FAILED, NULL); c->flags |= MG_F_CLOSE_IMMEDIATELY; } break; case MG_SNTP_MALFORMED_REPLY: mg_call(c, sd->hander, c->user_data, MG_SNTP_FAILED, NULL); c->flags |= MG_F_CLOSE_IMMEDIATELY; break; case MG_SNTP_REPLY: mg_call(c, sd->hander, c->user_data, MG_SNTP_REPLY, ev_data); c->flags |= MG_F_CLOSE_IMMEDIATELY; break; case MG_EV_CLOSE: MG_FREE(user_data); c->user_data = NULL; break; } } struct mg_connection *mg_sntp_get_time(struct mg_mgr *mgr, mg_event_handler_t event_handler, const char *sntp_server_name) { struct mg_connection *c; struct sntp_data *sd = (struct sntp_data *) MG_CALLOC(1, sizeof(*sd)); if (sd == NULL) { return NULL; } c = mg_sntp_connect(mgr, MG_CB(mg_sntp_util_ev_handler, sd), sntp_server_name); if (c == NULL) { MG_FREE(sd); return NULL; } sd->hander = event_handler; #if !MG_ENABLE_CALLBACK_USERDATA c->user_data = sd; #endif return c; } #endif /* MG_ENABLE_SNTP */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/cc3200/cc3200_libc.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if CS_PLATFORM == CS_P_CC3200 /* Amalgamated: #include "common/mg_mem.h" */ #include <stdio.h> #include <string.h> #ifndef __TI_COMPILER_VERSION__ #include <reent.h> #include <sys/stat.h> #include <sys/time.h> #include <unistd.h> #endif #include <inc/hw_types.h> #include <inc/hw_memmap.h> #include <driverlib/prcm.h> #include <driverlib/rom.h> #include <driverlib/rom_map.h> #include <driverlib/uart.h> #include <driverlib/utils.h> #define CONSOLE_UART UARTA0_BASE #ifdef __TI_COMPILER_VERSION__ int asprintf(char **strp, const char *fmt, ...) { va_list ap; int len; *strp = MG_MALLOC(BUFSIZ); if (*strp == NULL) return -1; va_start(ap, fmt); len = vsnprintf(*strp, BUFSIZ, fmt, ap); va_end(ap); if (len > 0) { *strp = MG_REALLOC(*strp, len + 1); if (*strp == NULL) return -1; } if (len >= BUFSIZ) { va_start(ap, fmt); len = vsnprintf(*strp, len + 1, fmt, ap); va_end(ap); } return len; } #if MG_TI_NO_HOST_INTERFACE time_t HOSTtime() { struct timeval tp; gettimeofday(&tp, NULL); return tp.tv_sec; } #endif #endif /* __TI_COMPILER_VERSION__ */ #ifndef __TI_COMPILER_VERSION__ int _gettimeofday_r(struct _reent *r, struct timeval *tp, void *tzp) { #else int gettimeofday(struct timeval *tp, void *tzp) { #endif unsigned long long r1 = 0, r2; /* Achieve two consecutive reads of the same value. */ do { r2 = r1; r1 = PRCMSlowClkCtrFastGet(); } while (r1 != r2); /* This is a 32768 Hz counter. */ tp->tv_sec = (r1 >> 15); /* 1/32768-th of a second is 30.517578125 microseconds, approx. 31, * but we round down so it doesn't overflow at 32767 */ tp->tv_usec = (r1 & 0x7FFF) * 30; return 0; } void fprint_str(FILE *fp, const char *str) { while (*str != '\0') { if (*str == '\n') MAP_UARTCharPut(CONSOLE_UART, '\r'); MAP_UARTCharPut(CONSOLE_UART, *str++); } } void _exit(int status) { fprint_str(stderr, "_exit\n"); /* cause an unaligned access exception, that will drop you into gdb */ *(int *) 1 = status; while (1) ; /* avoid gcc warning because stdlib abort() has noreturn attribute */ } void _not_implemented(const char *what) { fprint_str(stderr, what); fprint_str(stderr, " is not implemented\n"); _exit(42); } int _kill(int pid, int sig) { (void) pid; (void) sig; _not_implemented("_kill"); return -1; } int _getpid() { fprint_str(stderr, "_getpid is not implemented\n"); return 42; } int _isatty(int fd) { /* 0, 1 and 2 are TTYs. */ return fd < 2; } #endif /* CS_PLATFORM == CS_P_CC3200 */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/msp432/msp432_libc.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if CS_PLATFORM == CS_P_MSP432 #include <ti/sysbios/BIOS.h> #include <ti/sysbios/knl/Clock.h> int gettimeofday(struct timeval *tp, void *tzp) { uint32_t ticks = Clock_getTicks(); tp->tv_sec = ticks / 1000; tp->tv_usec = (ticks % 1000) * 1000; return 0; } #endif /* CS_PLATFORM == CS_P_MSP432 */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/nrf5/nrf5_libc.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if (CS_PLATFORM == CS_P_NRF51 || CS_PLATFORM == CS_P_NRF52) && \ defined(__ARMCC_VERSION) int gettimeofday(struct timeval *tp, void *tzp) { /* TODO */ tp->tv_sec = 0; tp->tv_usec = 0; return 0; } #endif #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_fs_slfs.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_PLATFORMS_SIMPLELINK_SL_FS_SLFS_H_ #define CS_COMMON_PLATFORMS_SIMPLELINK_SL_FS_SLFS_H_ #if defined(MG_FS_SLFS) #include <stdio.h> #ifndef __TI_COMPILER_VERSION__ #include <unistd.h> #include <sys/stat.h> #endif #define MAX_OPEN_SLFS_FILES 8 /* Indirect libc interface - same functions, different names. */ int fs_slfs_open(const char *pathname, int flags, mode_t mode); int fs_slfs_close(int fd); ssize_t fs_slfs_read(int fd, void *buf, size_t count); ssize_t fs_slfs_write(int fd, const void *buf, size_t count); int fs_slfs_stat(const char *pathname, struct stat *s); int fs_slfs_fstat(int fd, struct stat *s); off_t fs_slfs_lseek(int fd, off_t offset, int whence); int fs_slfs_unlink(const char *filename); int fs_slfs_rename(const char *from, const char *to); void fs_slfs_set_new_file_size(const char *name, size_t size); #endif /* defined(MG_FS_SLFS) */ #endif /* CS_COMMON_PLATFORMS_SIMPLELINK_SL_FS_SLFS_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_fs_slfs.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ /* Standard libc interface to TI SimpleLink FS. */ #if defined(MG_FS_SLFS) || defined(CC3200_FS_SLFS) /* Amalgamated: #include "common/platforms/simplelink/sl_fs_slfs.h" */ #include <errno.h> #if CS_PLATFORM == CS_P_CC3200 #include <inc/hw_types.h> #endif #include <simplelink/include/simplelink.h> #include <simplelink/include/fs.h> /* Amalgamated: #include "common/cs_dbg.h" */ /* Amalgamated: #include "common/mg_mem.h" */ /* From sl_fs.c */ extern int set_errno(int e); static const char *drop_dir(const char *fname, bool *is_slfs); /* * With SLFS, you have to pre-declare max file size. Yes. Really. * 64K should be enough for everyone. Right? */ #ifndef FS_SLFS_MAX_FILE_SIZE #define FS_SLFS_MAX_FILE_SIZE (64 * 1024) #endif struct sl_file_size_hint { char *name; size_t size; }; struct sl_fd_info { _i32 fh; _off_t pos; size_t size; }; static struct sl_fd_info s_sl_fds[MAX_OPEN_SLFS_FILES]; static struct sl_file_size_hint s_sl_file_size_hints[MAX_OPEN_SLFS_FILES]; static int sl_fs_to_errno(_i32 r) { DBG(("SL error: %d", (int) r)); switch (r) { case SL_FS_OK: return 0; case SL_FS_FILE_NAME_EXIST: return EEXIST; case SL_FS_WRONG_FILE_NAME: return EINVAL; case SL_FS_ERR_NO_AVAILABLE_NV_INDEX: case SL_FS_ERR_NO_AVAILABLE_BLOCKS: return ENOSPC; case SL_FS_ERR_FAILED_TO_ALLOCATE_MEM: return ENOMEM; case SL_FS_ERR_FILE_NOT_EXISTS: return ENOENT; case SL_FS_ERR_NOT_SUPPORTED: return ENOTSUP; } return ENXIO; } int fs_slfs_open(const char *pathname, int flags, mode_t mode) { int fd; for (fd = 0; fd < MAX_OPEN_SLFS_FILES; fd++) { if (s_sl_fds[fd].fh <= 0) break; } if (fd >= MAX_OPEN_SLFS_FILES) return set_errno(ENOMEM); struct sl_fd_info *fi = &s_sl_fds[fd]; /* * Apply path manipulations again, in case we got here directly * (via TI libc's "add_device"). */ pathname = drop_dir(pathname, NULL); _u32 am = 0; fi->size = (size_t) -1; int rw = (flags & 3); if (rw == O_RDONLY) { SlFsFileInfo_t sl_fi; _i32 r = sl_FsGetInfo((const _u8 *) pathname, 0, &sl_fi); if (r == SL_FS_OK) { fi->size = sl_fi.FileLen; } am = FS_MODE_OPEN_READ; } else { if (!(flags & O_TRUNC) || (flags & O_APPEND)) { // FailFS files cannot be opened for append and will be truncated // when opened for write. return set_errno(ENOTSUP); } if (flags & O_CREAT) { size_t i, size = FS_SLFS_MAX_FILE_SIZE; for (i = 0; i < MAX_OPEN_SLFS_FILES; i++) { if (s_sl_file_size_hints[i].name != NULL && strcmp(s_sl_file_size_hints[i].name, pathname) == 0) { size = s_sl_file_size_hints[i].size; MG_FREE(s_sl_file_size_hints[i].name); s_sl_file_size_hints[i].name = NULL; break; } } DBG(("creating %s with max size %d", pathname, (int) size)); am = FS_MODE_OPEN_CREATE(size, 0); } else { am = FS_MODE_OPEN_WRITE; } } _i32 r = sl_FsOpen((_u8 *) pathname, am, NULL, &fi->fh); DBG(("sl_FsOpen(%s, 0x%x) = %d, %d", pathname, (int) am, (int) r, (int) fi->fh)); if (r == SL_FS_OK) { fi->pos = 0; r = fd; } else { fi->fh = -1; r = set_errno(sl_fs_to_errno(r)); } return r; } int fs_slfs_close(int fd) { struct sl_fd_info *fi = &s_sl_fds[fd]; if (fi->fh <= 0) return set_errno(EBADF); _i32 r = sl_FsClose(fi->fh, NULL, NULL, 0); DBG(("sl_FsClose(%d) = %d", (int) fi->fh, (int) r)); s_sl_fds[fd].fh = -1; return set_errno(sl_fs_to_errno(r)); } ssize_t fs_slfs_read(int fd, void *buf, size_t count) { struct sl_fd_info *fi = &s_sl_fds[fd]; if (fi->fh <= 0) return set_errno(EBADF); /* Simulate EOF. sl_FsRead @ file_size return SL_FS_ERR_OFFSET_OUT_OF_RANGE. */ if (fi->pos == fi->size) return 0; _i32 r = sl_FsRead(fi->fh, fi->pos, buf, count); DBG(("sl_FsRead(%d, %d, %d) = %d", (int) fi->fh, (int) fi->pos, (int) count, (int) r)); if (r >= 0) { fi->pos += r; return r; } return set_errno(sl_fs_to_errno(r)); } ssize_t fs_slfs_write(int fd, const void *buf, size_t count) { struct sl_fd_info *fi = &s_sl_fds[fd]; if (fi->fh <= 0) return set_errno(EBADF); _i32 r = sl_FsWrite(fi->fh, fi->pos, (_u8 *) buf, count); DBG(("sl_FsWrite(%d, %d, %d) = %d", (int) fi->fh, (int) fi->pos, (int) count, (int) r)); if (r >= 0) { fi->pos += r; return r; } return set_errno(sl_fs_to_errno(r)); } int fs_slfs_stat(const char *pathname, struct stat *s) { SlFsFileInfo_t sl_fi; /* * Apply path manipulations again, in case we got here directly * (via TI libc's "add_device"). */ pathname = drop_dir(pathname, NULL); _i32 r = sl_FsGetInfo((const _u8 *) pathname, 0, &sl_fi); if (r == SL_FS_OK) { s->st_mode = S_IFREG | 0666; s->st_nlink = 1; s->st_size = sl_fi.FileLen; return 0; } return set_errno(sl_fs_to_errno(r)); } int fs_slfs_fstat(int fd, struct stat *s) { struct sl_fd_info *fi = &s_sl_fds[fd]; if (fi->fh <= 0) return set_errno(EBADF); s->st_mode = 0666; s->st_mode = S_IFREG | 0666; s->st_nlink = 1; s->st_size = fi->size; return 0; } off_t fs_slfs_lseek(int fd, off_t offset, int whence) { if (s_sl_fds[fd].fh <= 0) return set_errno(EBADF); switch (whence) { case SEEK_SET: s_sl_fds[fd].pos = offset; break; case SEEK_CUR: s_sl_fds[fd].pos += offset; break; case SEEK_END: return set_errno(ENOTSUP); } return 0; } int fs_slfs_unlink(const char *pathname) { /* * Apply path manipulations again, in case we got here directly * (via TI libc's "add_device"). */ pathname = drop_dir(pathname, NULL); return set_errno(sl_fs_to_errno(sl_FsDel((const _u8 *) pathname, 0))); } int fs_slfs_rename(const char *from, const char *to) { return set_errno(ENOTSUP); } void fs_slfs_set_new_file_size(const char *name, size_t size) { int i; for (i = 0; i < MAX_OPEN_SLFS_FILES; i++) { if (s_sl_file_size_hints[i].name == NULL) { DBG(("File size hint: %s %d", name, (int) size)); s_sl_file_size_hints[i].name = strdup(name); s_sl_file_size_hints[i].size = size; break; } } } #endif /* defined(MG_FS_SLFS) || defined(CC3200_FS_SLFS) */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_fs.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_NET_IF == MG_NET_IF_SIMPLELINK && \ (defined(MG_FS_SLFS) || defined(MG_FS_SPIFFS)) #include <errno.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef __TI_COMPILER_VERSION__ #include <file.h> #endif /* Amalgamated: #include "common/cs_dbg.h" */ /* Amalgamated: #include "common/platform.h" */ #ifdef CC3200_FS_SPIFFS /* Amalgamated: #include "cc3200_fs_spiffs.h" */ #endif #ifdef MG_FS_SLFS /* Amalgamated: #include "sl_fs_slfs.h" */ #endif #define NUM_SYS_FDS 3 #define SPIFFS_FD_BASE 10 #define SLFS_FD_BASE 100 #ifndef MG_UART_CHAR_PUT #if CS_PLATFORM == CS_P_CC3200 #include <inc/hw_types.h> #include <inc/hw_memmap.h> #include <driverlib/rom.h> #include <driverlib/rom_map.h> #include <driverlib/uart.h> #define MG_UART_CHAR_PUT(fd, c) MAP_UARTCharPut(UARTA0_BASE, c); #else #define MG_UART_CHAR_PUT(fd, c) #endif /* CS_PLATFORM == CS_P_CC3200 */ #endif /* !MG_UART_CHAR_PUT */ int set_errno(int e) { errno = e; return (e == 0 ? 0 : -1); } static const char *drop_dir(const char *fname, bool *is_slfs) { if (is_slfs != NULL) { *is_slfs = (strncmp(fname, "SL:", 3) == 0); if (*is_slfs) fname += 3; } /* Drop "./", if any */ if (fname[0] == '.' && fname[1] == '/') { fname += 2; } /* * Drop / if it is the only one in the path. * This allows use of /pretend/directories but serves /file.txt as normal. */ if (fname[0] == '/' && strchr(fname + 1, '/') == NULL) { fname++; } return fname; } enum fd_type { FD_INVALID, FD_SYS, #ifdef CC3200_FS_SPIFFS FD_SPIFFS, #endif #ifdef MG_FS_SLFS FD_SLFS #endif }; static int fd_type(int fd) { if (fd >= 0 && fd < NUM_SYS_FDS) return FD_SYS; #ifdef CC3200_FS_SPIFFS if (fd >= SPIFFS_FD_BASE && fd < SPIFFS_FD_BASE + MAX_OPEN_SPIFFS_FILES) { return FD_SPIFFS; } #endif #ifdef MG_FS_SLFS if (fd >= SLFS_FD_BASE && fd < SLFS_FD_BASE + MAX_OPEN_SLFS_FILES) { return FD_SLFS; } #endif return FD_INVALID; } #if MG_TI_NO_HOST_INTERFACE int open(const char *pathname, unsigned flags, int mode) { #else int _open(const char *pathname, int flags, mode_t mode) { #endif int fd = -1; bool is_sl; const char *fname = drop_dir(pathname, &is_sl); if (is_sl) { #ifdef MG_FS_SLFS fd = fs_slfs_open(fname, flags, mode); if (fd >= 0) fd += SLFS_FD_BASE; #endif } else { #ifdef CC3200_FS_SPIFFS fd = fs_spiffs_open(fname, flags, mode); if (fd >= 0) fd += SPIFFS_FD_BASE; #endif } LOG(LL_DEBUG, ("open(%s, 0x%x) = %d, fname = %s", pathname, flags, fd, fname)); return fd; } int _stat(const char *pathname, struct stat *st) { int res = -1; bool is_sl; const char *fname = drop_dir(pathname, &is_sl); memset(st, 0, sizeof(*st)); /* Simulate statting the root directory. */ if (fname[0] == '\0' || strcmp(fname, ".") == 0) { st->st_ino = 0; st->st_mode = S_IFDIR | 0777; st->st_nlink = 1; st->st_size = 0; return 0; } if (is_sl) { #ifdef MG_FS_SLFS res = fs_slfs_stat(fname, st); #endif } else { #ifdef CC3200_FS_SPIFFS res = fs_spiffs_stat(fname, st); #endif } LOG(LL_DEBUG, ("stat(%s) = %d; fname = %s", pathname, res, fname)); return res; } #if MG_TI_NO_HOST_INTERFACE int close(int fd) { #else int _close(int fd) { #endif int r = -1; switch (fd_type(fd)) { case FD_INVALID: r = set_errno(EBADF); break; case FD_SYS: r = set_errno(EACCES); break; #ifdef CC3200_FS_SPIFFS case FD_SPIFFS: r = fs_spiffs_close(fd - SPIFFS_FD_BASE); break; #endif #ifdef MG_FS_SLFS case FD_SLFS: r = fs_slfs_close(fd - SLFS_FD_BASE); break; #endif } DBG(("close(%d) = %d", fd, r)); return r; } #if MG_TI_NO_HOST_INTERFACE off_t lseek(int fd, off_t offset, int whence) { #else off_t _lseek(int fd, off_t offset, int whence) { #endif int r = -1; switch (fd_type(fd)) { case FD_INVALID: r = set_errno(EBADF); break; case FD_SYS: r = set_errno(ESPIPE); break; #ifdef CC3200_FS_SPIFFS case FD_SPIFFS: r = fs_spiffs_lseek(fd - SPIFFS_FD_BASE, offset, whence); break; #endif #ifdef MG_FS_SLFS case FD_SLFS: r = fs_slfs_lseek(fd - SLFS_FD_BASE, offset, whence); break; #endif } DBG(("lseek(%d, %d, %d) = %d", fd, (int) offset, whence, r)); return r; } int _fstat(int fd, struct stat *s) { int r = -1; memset(s, 0, sizeof(*s)); switch (fd_type(fd)) { case FD_INVALID: r = set_errno(EBADF); break; case FD_SYS: { /* Create barely passable stats for STD{IN,OUT,ERR}. */ memset(s, 0, sizeof(*s)); s->st_ino = fd; s->st_mode = S_IFCHR | 0666; r = 0; break; } #ifdef CC3200_FS_SPIFFS case FD_SPIFFS: r = fs_spiffs_fstat(fd - SPIFFS_FD_BASE, s); break; #endif #ifdef MG_FS_SLFS case FD_SLFS: r = fs_slfs_fstat(fd - SLFS_FD_BASE, s); break; #endif } DBG(("fstat(%d) = %d", fd, r)); return r; } #if MG_TI_NO_HOST_INTERFACE int read(int fd, char *buf, unsigned count) { #else ssize_t _read(int fd, void *buf, size_t count) { #endif int r = -1; switch (fd_type(fd)) { case FD_INVALID: r = set_errno(EBADF); break; case FD_SYS: { if (fd != 0) { r = set_errno(EACCES); break; } /* Should we allow reading from stdin = uart? */ r = set_errno(ENOTSUP); break; } #ifdef CC3200_FS_SPIFFS case FD_SPIFFS: r = fs_spiffs_read(fd - SPIFFS_FD_BASE, buf, count); break; #endif #ifdef MG_FS_SLFS case FD_SLFS: r = fs_slfs_read(fd - SLFS_FD_BASE, buf, count); break; #endif } DBG(("read(%d, %u) = %d", fd, count, r)); return r; } #if MG_TI_NO_HOST_INTERFACE int write(int fd, const char *buf, unsigned count) { #else ssize_t _write(int fd, const void *buf, size_t count) { #endif int r = -1; size_t i = 0; switch (fd_type(fd)) { case FD_INVALID: r = set_errno(EBADF); break; case FD_SYS: { if (fd == 0) { r = set_errno(EACCES); break; } for (i = 0; i < count; i++) { const char c = ((const char *) buf)[i]; if (c == '\n') MG_UART_CHAR_PUT(fd, '\r'); MG_UART_CHAR_PUT(fd, c); } r = count; break; } #ifdef CC3200_FS_SPIFFS case FD_SPIFFS: r = fs_spiffs_write(fd - SPIFFS_FD_BASE, buf, count); break; #endif #ifdef MG_FS_SLFS case FD_SLFS: r = fs_slfs_write(fd - SLFS_FD_BASE, buf, count); break; #endif } return r; } /* * On Newlib we override rename directly too, because the default * implementation using _link and _unlink doesn't work for us. */ #if MG_TI_NO_HOST_INTERFACE || defined(_NEWLIB_VERSION) int rename(const char *frompath, const char *topath) { int r = -1; bool is_sl_from, is_sl_to; const char *from = drop_dir(frompath, &is_sl_from); const char *to = drop_dir(topath, &is_sl_to); if (is_sl_from || is_sl_to) { set_errno(ENOTSUP); } else { #ifdef CC3200_FS_SPIFFS r = fs_spiffs_rename(from, to); #endif } DBG(("rename(%s, %s) = %d", from, to, r)); return r; } #endif /* MG_TI_NO_HOST_INTERFACE || defined(_NEWLIB_VERSION) */ #if MG_TI_NO_HOST_INTERFACE int unlink(const char *pathname) { #else int _unlink(const char *pathname) { #endif int r = -1; bool is_sl; const char *fname = drop_dir(pathname, &is_sl); if (is_sl) { #ifdef MG_FS_SLFS r = fs_slfs_unlink(fname); #endif } else { #ifdef CC3200_FS_SPIFFS r = fs_spiffs_unlink(fname); #endif } DBG(("unlink(%s) = %d, fname = %s", pathname, r, fname)); return r; } #ifdef CC3200_FS_SPIFFS /* FailFS does not support listing files. */ DIR *opendir(const char *dir_name) { DIR *r = NULL; bool is_sl; drop_dir(dir_name, &is_sl); if (is_sl) { r = NULL; set_errno(ENOTSUP); } else { r = fs_spiffs_opendir(dir_name); } DBG(("opendir(%s) = %p", dir_name, r)); return r; } struct dirent *readdir(DIR *dir) { struct dirent *res = fs_spiffs_readdir(dir); DBG(("readdir(%p) = %p", dir, res)); return res; } int closedir(DIR *dir) { int res = fs_spiffs_closedir(dir); DBG(("closedir(%p) = %d", dir, res)); return res; } int rmdir(const char *path) { return fs_spiffs_rmdir(path); } int mkdir(const char *path, mode_t mode) { (void) path; (void) mode; /* for spiffs supports only root dir, which comes from mongoose as '.' */ return (strlen(path) == 1 && *path == '.') ? 0 : ENOTDIR; } #endif int sl_fs_init(void) { int ret = 1; #ifdef __TI_COMPILER_VERSION__ #ifdef MG_FS_SLFS #pragma diag_push #pragma diag_suppress 169 /* Nothing we can do about the prototype mismatch. \ */ ret = (add_device("SL", _MSA, fs_slfs_open, fs_slfs_close, fs_slfs_read, fs_slfs_write, fs_slfs_lseek, fs_slfs_unlink, fs_slfs_rename) == 0); #pragma diag_pop #endif #endif return ret; } #endif /* MG_NET_IF == MG_NET_IF_SIMPLELINK && (defined(MG_FS_SLFS) || \ defined(MG_FS_SPIFFS)) */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_socket.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_NET_IF == MG_NET_IF_SIMPLELINK #include <errno.h> #include <stdio.h> /* Amalgamated: #include "common/platform.h" */ const char *inet_ntop(int af, const void *src, char *dst, socklen_t size) { int res; struct in_addr *in = (struct in_addr *) src; if (af != AF_INET) { errno = EAFNOSUPPORT; return NULL; } res = snprintf(dst, size, "%lu.%lu.%lu.%lu", SL_IPV4_BYTE(in->s_addr, 0), SL_IPV4_BYTE(in->s_addr, 1), SL_IPV4_BYTE(in->s_addr, 2), SL_IPV4_BYTE(in->s_addr, 3)); return res > 0 ? dst : NULL; } char *inet_ntoa(struct in_addr n) { static char a[16]; return (char *) inet_ntop(AF_INET, &n, a, sizeof(a)); } int inet_pton(int af, const char *src, void *dst) { uint32_t a0, a1, a2, a3; uint8_t *db = (uint8_t *) dst; if (af != AF_INET) { errno = EAFNOSUPPORT; return 0; } if (sscanf(src, "%lu.%lu.%lu.%lu", &a0, &a1, &a2, &a3) != 4) { return 0; } *db = a3; *(db + 1) = a2; *(db + 2) = a1; *(db + 3) = a0; return 1; } #endif /* MG_NET_IF == MG_NET_IF_SIMPLELINK */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_mg_task.c" #endif #if MG_NET_IF == MG_NET_IF_SIMPLELINK && !defined(MG_SIMPLELINK_NO_OSI) /* Amalgamated: #include "mg_task.h" */ #include <oslib/osi.h> enum mg_q_msg_type { MG_Q_MSG_CB, }; struct mg_q_msg { enum mg_q_msg_type type; void (*cb)(struct mg_mgr *mgr, void *arg); void *arg; }; static OsiMsgQ_t s_mg_q; static void mg_task(void *arg); bool mg_start_task(int priority, int stack_size, mg_init_cb mg_init) { if (osi_MsgQCreate(&s_mg_q, "MG", sizeof(struct mg_q_msg), 16) != OSI_OK) { return false; } if (osi_TaskCreate(mg_task, (const signed char *) "MG", stack_size, (void *) mg_init, priority, NULL) != OSI_OK) { return false; } return true; } static void mg_task(void *arg) { struct mg_mgr mgr; mg_init_cb mg_init = (mg_init_cb) arg; mg_mgr_init(&mgr, NULL); mg_init(&mgr); while (1) { struct mg_q_msg msg; mg_mgr_poll(&mgr, 1); if (osi_MsgQRead(&s_mg_q, &msg, 1) != OSI_OK) continue; switch (msg.type) { case MG_Q_MSG_CB: { msg.cb(&mgr, msg.arg); } } } } void mg_run_in_task(void (*cb)(struct mg_mgr *mgr, void *arg), void *cb_arg) { struct mg_q_msg msg = {MG_Q_MSG_CB, cb, cb_arg}; osi_MsgQWrite(&s_mg_q, &msg, OSI_NO_WAIT); } #endif /* MG_NET_IF == MG_NET_IF_SIMPLELINK && !defined(MG_SIMPLELINK_NO_OSI) \ */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_net_if.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_PLATFORMS_SIMPLELINK_SL_NET_IF_H_ #define CS_COMMON_PLATFORMS_SIMPLELINK_SL_NET_IF_H_ /* Amalgamated: #include "mongoose/src/net_if.h" */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifndef MG_ENABLE_NET_IF_SIMPLELINK #define MG_ENABLE_NET_IF_SIMPLELINK MG_NET_IF == MG_NET_IF_SIMPLELINK #endif extern const struct mg_iface_vtable mg_simplelink_iface_vtable; #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* CS_COMMON_PLATFORMS_SIMPLELINK_SL_NET_IF_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_net_if.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ /* Amalgamated: #include "common/platforms/simplelink/sl_net_if.h" */ #if MG_ENABLE_NET_IF_SIMPLELINK /* Amalgamated: #include "mongoose/src/internal.h" */ /* Amalgamated: #include "mongoose/src/util.h" */ #define MG_TCP_RECV_BUFFER_SIZE 1024 #define MG_UDP_RECV_BUFFER_SIZE 1500 static sock_t mg_open_listening_socket(union socket_address *sa, int type, int proto); int sl_set_ssl_opts(struct mg_connection *nc); void mg_set_non_blocking_mode(sock_t sock) { SlSockNonblocking_t opt; opt.NonblockingEnabled = 1; sl_SetSockOpt(sock, SL_SOL_SOCKET, SL_SO_NONBLOCKING, &opt, sizeof(opt)); } static int mg_is_error(int n) { return (n < 0 && n != SL_EALREADY && n != SL_EAGAIN); } void mg_sl_if_connect_tcp(struct mg_connection *nc, const union socket_address *sa) { int proto = 0; if (nc->flags & MG_F_SSL) proto = SL_SEC_SOCKET; sock_t sock = sl_Socket(AF_INET, SOCK_STREAM, proto); if (sock < 0) { nc->err = sock; goto out; } mg_sock_set(nc, sock); #if MG_ENABLE_SSL nc->err = sl_set_ssl_opts(nc); if (nc->err != 0) goto out; #endif nc->err = sl_Connect(sock, &sa->sa, sizeof(sa->sin)); out: DBG(("%p to %s:%d sock %d %d err %d", nc, inet_ntoa(sa->sin.sin_addr), ntohs(sa->sin.sin_port), nc->sock, proto, nc->err)); } void mg_sl_if_connect_udp(struct mg_connection *nc) { sock_t sock = sl_Socket(AF_INET, SOCK_DGRAM, 0); if (sock < 0) { nc->err = sock; return; } mg_sock_set(nc, sock); nc->err = 0; } int mg_sl_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) { int proto = 0; if (nc->flags & MG_F_SSL) proto = SL_SEC_SOCKET; sock_t sock = mg_open_listening_socket(sa, SOCK_STREAM, proto); if (sock < 0) return sock; mg_sock_set(nc, sock); #if MG_ENABLE_SSL return sl_set_ssl_opts(nc); #else return 0; #endif } int mg_sl_if_listen_udp(struct mg_connection *nc, union socket_address *sa) { sock_t sock = mg_open_listening_socket(sa, SOCK_DGRAM, 0); if (sock == INVALID_SOCKET) return (errno ? errno : 1); mg_sock_set(nc, sock); return 0; } void mg_sl_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); } void mg_sl_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); } void mg_sl_if_recved(struct mg_connection *nc, size_t len) { (void) nc; (void) len; } int mg_sl_if_create_conn(struct mg_connection *nc) { (void) nc; return 1; } void mg_sl_if_destroy_conn(struct mg_connection *nc) { if (nc->sock == INVALID_SOCKET) return; /* For UDP, only close outgoing sockets or listeners. */ if (!(nc->flags & MG_F_UDP) || nc->listener == NULL) { sl_Close(nc->sock); } nc->sock = INVALID_SOCKET; } static int mg_accept_conn(struct mg_connection *lc) { struct mg_connection *nc; union socket_address sa; socklen_t sa_len = sizeof(sa); sock_t sock = sl_Accept(lc->sock, &sa.sa, &sa_len); if (sock < 0) { DBG(("%p: failed to accept: %d", lc, sock)); return 0; } nc = mg_if_accept_new_conn(lc); if (nc == NULL) { sl_Close(sock); return 0; } DBG(("%p conn from %s:%d", nc, inet_ntoa(sa.sin.sin_addr), ntohs(sa.sin.sin_port))); mg_sock_set(nc, sock); if (nc->flags & MG_F_SSL) nc->flags |= MG_F_SSL_HANDSHAKE_DONE; mg_if_accept_tcp_cb(nc, &sa, sa_len); return 1; } /* 'sa' must be an initialized address to bind to */ static sock_t mg_open_listening_socket(union socket_address *sa, int type, int proto) { int r; socklen_t sa_len = (sa->sa.sa_family == AF_INET) ? sizeof(sa->sin) : sizeof(sa->sin6); sock_t sock = sl_Socket(sa->sa.sa_family, type, proto); if (sock < 0) return sock; if ((r = sl_Bind(sock, &sa->sa, sa_len)) < 0) { sl_Close(sock); return r; } if (type != SOCK_DGRAM && (r = sl_Listen(sock, SOMAXCONN)) < 0) { sl_Close(sock); return r; } mg_set_non_blocking_mode(sock); return sock; } static void mg_write_to_socket(struct mg_connection *nc) { struct mbuf *io = &nc->send_mbuf; int n = 0; if (nc->flags & MG_F_UDP) { n = sl_SendTo(nc->sock, io->buf, io->len, 0, &nc->sa.sa, sizeof(nc->sa.sin)); DBG(("%p %d %d %d %s:%hu", nc, nc->sock, n, errno, inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port))); } else { n = (int) sl_Send(nc->sock, io->buf, io->len, 0); DBG(("%p %d bytes -> %d", nc, n, nc->sock)); } if (n > 0) { mbuf_remove(io, n); mg_if_sent_cb(nc, n); } else if (n < 0 && mg_is_error(n)) { /* Something went wrong, drop the connection. */ nc->flags |= MG_F_CLOSE_IMMEDIATELY; } } MG_INTERNAL size_t recv_avail_size(struct mg_connection *conn, size_t max) { size_t avail; if (conn->recv_mbuf_limit < conn->recv_mbuf.len) return 0; avail = conn->recv_mbuf_limit - conn->recv_mbuf.len; return avail > max ? max : avail; } static void mg_handle_tcp_read(struct mg_connection *conn) { int n = 0; char *buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE); if (buf == NULL) { DBG(("OOM")); return; } n = (int) sl_Recv(conn->sock, buf, recv_avail_size(conn, MG_TCP_RECV_BUFFER_SIZE), 0); DBG(("%p %d bytes <- %d", conn, n, conn->sock)); if (n > 0) { mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */); } else { MG_FREE(buf); } if (n == 0) { /* Orderly shutdown of the socket, try flushing output. */ conn->flags |= MG_F_SEND_AND_CLOSE; } else if (mg_is_error(n)) { conn->flags |= MG_F_CLOSE_IMMEDIATELY; } } static void mg_handle_udp_read(struct mg_connection *nc) { char *buf = (char *) MG_MALLOC(MG_UDP_RECV_BUFFER_SIZE); if (buf == NULL) return; union socket_address sa; socklen_t sa_len = sizeof(sa); int n = sl_RecvFrom(nc->sock, buf, MG_UDP_RECV_BUFFER_SIZE, 0, (SlSockAddr_t *) &sa, &sa_len); DBG(("%p %d bytes from %s:%d", nc, n, inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port))); if (n > 0) { mg_if_recv_udp_cb(nc, buf, n, &sa, sa_len); } else { MG_FREE(buf); } } #define _MG_F_FD_CAN_READ 1 #define _MG_F_FD_CAN_WRITE 1 << 1 #define _MG_F_FD_ERROR 1 << 2 void mg_mgr_handle_conn(struct mg_connection *nc, int fd_flags, double now) { DBG(("%p fd=%d fd_flags=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock, fd_flags, nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len)); if (nc->flags & MG_F_CONNECTING) { if (nc->flags & MG_F_UDP || nc->err != SL_EALREADY) { mg_if_connect_cb(nc, nc->err); } else { /* In SimpleLink, to get status of non-blocking connect() we need to wait * until socket is writable and repeat the call to sl_Connect again, * which will now return the real status. */ if (fd_flags & _MG_F_FD_CAN_WRITE) { nc->err = sl_Connect(nc->sock, &nc->sa.sa, sizeof(nc->sa.sin)); DBG(("%p conn res=%d", nc, nc->err)); if (nc->err == SL_ESECSNOVERIFY || /* TODO(rojer): Provide API to set the date for verification. */ nc->err == SL_ESECDATEERROR) { nc->err = 0; } if (nc->flags & MG_F_SSL && nc->err == 0) { nc->flags |= MG_F_SSL_HANDSHAKE_DONE; } mg_if_connect_cb(nc, nc->err); } } /* Ignore read/write in further processing, we've handled it. */ fd_flags &= ~(_MG_F_FD_CAN_READ | _MG_F_FD_CAN_WRITE); } if (fd_flags & _MG_F_FD_CAN_READ) { if (nc->flags & MG_F_UDP) { mg_handle_udp_read(nc); } else { if (nc->flags & MG_F_LISTENING) { mg_accept_conn(nc); } else { mg_handle_tcp_read(nc); } } } if (!(nc->flags & MG_F_CLOSE_IMMEDIATELY)) { if ((fd_flags & _MG_F_FD_CAN_WRITE) && nc->send_mbuf.len > 0) { mg_write_to_socket(nc); } if (!(fd_flags & (_MG_F_FD_CAN_READ | _MG_F_FD_CAN_WRITE))) { mg_if_poll(nc, now); } mg_if_timer(nc, now); } DBG(("%p after fd=%d nc_flags=%lu rmbl=%d smbl=%d", nc, nc->sock, nc->flags, (int) nc->recv_mbuf.len, (int) nc->send_mbuf.len)); } /* Associate a socket to a connection. */ void mg_sl_if_sock_set(struct mg_connection *nc, sock_t sock) { mg_set_non_blocking_mode(sock); nc->sock = sock; DBG(("%p %d", nc, sock)); } void mg_sl_if_init(struct mg_iface *iface) { (void) iface; DBG(("%p using sl_Select()", iface->mgr)); } void mg_sl_if_free(struct mg_iface *iface) { (void) iface; } void mg_sl_if_add_conn(struct mg_connection *nc) { (void) nc; } void mg_sl_if_remove_conn(struct mg_connection *nc) { (void) nc; } time_t mg_sl_if_poll(struct mg_iface *iface, int timeout_ms) { struct mg_mgr *mgr = iface->mgr; double now = mg_time(); double min_timer; struct mg_connection *nc, *tmp; struct SlTimeval_t tv; SlFdSet_t read_set, write_set, err_set; sock_t max_fd = INVALID_SOCKET; int num_fds, num_ev = 0, num_timers = 0; SL_FD_ZERO(&read_set); SL_FD_ZERO(&write_set); SL_FD_ZERO(&err_set); /* * Note: it is ok to have connections with sock == INVALID_SOCKET in the list, * e.g. timer-only "connections". */ min_timer = 0; for (nc = mgr->active_connections, num_fds = 0; nc != NULL; nc = tmp) { tmp = nc->next; if (nc->sock != INVALID_SOCKET) { num_fds++; if (!(nc->flags & MG_F_WANT_WRITE) && nc->recv_mbuf.len < nc->recv_mbuf_limit && (!(nc->flags & MG_F_UDP) || nc->listener == NULL)) { SL_FD_SET(nc->sock, &read_set); if (max_fd == INVALID_SOCKET || nc->sock > max_fd) max_fd = nc->sock; } if (((nc->flags & MG_F_CONNECTING) && !(nc->flags & MG_F_WANT_READ)) || (nc->send_mbuf.len > 0 && !(nc->flags & MG_F_CONNECTING))) { SL_FD_SET(nc->sock, &write_set); SL_FD_SET(nc->sock, &err_set); if (max_fd == INVALID_SOCKET || nc->sock > max_fd) max_fd = nc->sock; } } if (nc->ev_timer_time > 0) { if (num_timers == 0 || nc->ev_timer_time < min_timer) { min_timer = nc->ev_timer_time; } num_timers++; } } /* * If there is a timer to be fired earlier than the requested timeout, * adjust the timeout. */ if (num_timers > 0) { double timer_timeout_ms = (min_timer - mg_time()) * 1000 + 1 /* rounding */; if (timer_timeout_ms < timeout_ms) { timeout_ms = timer_timeout_ms; } } if (timeout_ms < 0) timeout_ms = 0; tv.tv_sec = timeout_ms / 1000; tv.tv_usec = (timeout_ms % 1000) * 1000; if (num_fds > 0) { num_ev = sl_Select((int) max_fd + 1, &read_set, &write_set, &err_set, &tv); } now = mg_time(); DBG(("sl_Select @ %ld num_ev=%d of %d, timeout=%d", (long) now, num_ev, num_fds, timeout_ms)); for (nc = mgr->active_connections; nc != NULL; nc = tmp) { int fd_flags = 0; if (nc->sock != INVALID_SOCKET) { if (num_ev > 0) { fd_flags = (SL_FD_ISSET(nc->sock, &read_set) && (!(nc->flags & MG_F_UDP) || nc->listener == NULL) ? _MG_F_FD_CAN_READ : 0) | (SL_FD_ISSET(nc->sock, &write_set) ? _MG_F_FD_CAN_WRITE : 0) | (SL_FD_ISSET(nc->sock, &err_set) ? _MG_F_FD_ERROR : 0); } /* SimpleLink does not report UDP sockets as writable. */ if (nc->flags & MG_F_UDP && nc->send_mbuf.len > 0) { fd_flags |= _MG_F_FD_CAN_WRITE; } } tmp = nc->next; mg_mgr_handle_conn(nc, fd_flags, now); } for (nc = mgr->active_connections; nc != NULL; nc = tmp) { tmp = nc->next; if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) || (nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE))) { mg_close_conn(nc); } } return now; } void mg_sl_if_get_conn_addr(struct mg_connection *nc, int remote, union socket_address *sa) { /* SimpleLink does not provide a way to get socket's peer address after * accept or connect. Address should have been preserved in the connection, * so we do our best here by using it. */ if (remote) memcpy(sa, &nc->sa, sizeof(*sa)); } void sl_restart_cb(struct mg_mgr *mgr) { /* * SimpleLink has been restarted, meaning all sockets have been invalidated. * We try our best - we'll restart the listeners, but for outgoing * connections we have no option but to terminate. */ struct mg_connection *nc; for (nc = mg_next(mgr, NULL); nc != NULL; nc = mg_next(mgr, nc)) { if (nc->sock == INVALID_SOCKET) continue; /* Could be a timer */ if (nc->flags & MG_F_LISTENING) { DBG(("restarting %p %s:%d", nc, inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port))); int res = (nc->flags & MG_F_UDP ? mg_sl_if_listen_udp(nc, &nc->sa) : mg_sl_if_listen_tcp(nc, &nc->sa)); if (res == 0) continue; /* Well, we tried and failed. Fall through to closing. */ } nc->sock = INVALID_SOCKET; DBG(("terminating %p %s:%d", nc, inet_ntoa(nc->sa.sin.sin_addr), ntohs(nc->sa.sin.sin_port))); /* TODO(rojer): Outgoing UDP? */ nc->flags |= MG_F_CLOSE_IMMEDIATELY; } } /* clang-format off */ #define MG_SL_IFACE_VTABLE \ { \ mg_sl_if_init, \ mg_sl_if_free, \ mg_sl_if_add_conn, \ mg_sl_if_remove_conn, \ mg_sl_if_poll, \ mg_sl_if_listen_tcp, \ mg_sl_if_listen_udp, \ mg_sl_if_connect_tcp, \ mg_sl_if_connect_udp, \ mg_sl_if_tcp_send, \ mg_sl_if_udp_send, \ mg_sl_if_recved, \ mg_sl_if_create_conn, \ mg_sl_if_destroy_conn, \ mg_sl_if_sock_set, \ mg_sl_if_get_conn_addr, \ } /* clang-format on */ const struct mg_iface_vtable mg_simplelink_iface_vtable = MG_SL_IFACE_VTABLE; #if MG_NET_IF == MG_NET_IF_SIMPLELINK const struct mg_iface_vtable mg_default_iface_vtable = MG_SL_IFACE_VTABLE; #endif #endif /* MG_ENABLE_NET_IF_SIMPLELINK */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/simplelink/sl_ssl_if.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_SIMPLELINK /* Amalgamated: #include "common/mg_mem.h" */ struct mg_ssl_if_ctx { char *ssl_cert; char *ssl_key; char *ssl_ca_cert; char *ssl_server_name; }; void mg_ssl_if_init() { } enum mg_ssl_if_result mg_ssl_if_conn_init( struct mg_connection *nc, const struct mg_ssl_if_conn_params *params, const char **err_msg) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) MG_CALLOC(1, sizeof(*ctx)); if (ctx == NULL) { MG_SET_PTRPTR(err_msg, "Out of memory"); return MG_SSL_ERROR; } nc->ssl_if_data = ctx; if (params->cert != NULL || params->key != NULL) { if (params->cert != NULL && params->key != NULL) { ctx->ssl_cert = strdup(params->cert); ctx->ssl_key = strdup(params->key); } else { MG_SET_PTRPTR(err_msg, "Both cert and key are required."); return MG_SSL_ERROR; } } if (params->ca_cert != NULL && strcmp(params->ca_cert, "*") != 0) { ctx->ssl_ca_cert = strdup(params->ca_cert); } /* TODO(rojer): cipher_suites. */ if (params->server_name != NULL) { ctx->ssl_server_name = strdup(params->server_name); } return MG_SSL_OK; } void mg_ssl_if_conn_close_notify(struct mg_connection *nc) { /* Nothing to do */ (void) nc; } void mg_ssl_if_conn_free(struct mg_connection *nc) { struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; if (ctx == NULL) return; nc->ssl_if_data = NULL; MG_FREE(ctx->ssl_cert); MG_FREE(ctx->ssl_key); MG_FREE(ctx->ssl_ca_cert); MG_FREE(ctx->ssl_server_name); memset(ctx, 0, sizeof(*ctx)); MG_FREE(ctx); } bool pem_to_der(const char *pem_file, const char *der_file) { bool ret = false; FILE *pf = NULL, *df = NULL; bool writing = false; pf = fopen(pem_file, "r"); if (pf == NULL) goto clean; remove(der_file); fs_slfs_set_new_file_size(der_file + 3, 2048); df = fopen(der_file, "w"); if (df == NULL) goto clean; while (1) { char pem_buf[70]; char der_buf[48]; if (!fgets(pem_buf, sizeof(pem_buf), pf)) break; if (writing) { if (strstr(pem_buf, "-----END ") != NULL) { ret = true; break; } int l = 0; while (!isspace((unsigned int) pem_buf[l])) l++; int der_len = 0; cs_base64_decode((const unsigned char *) pem_buf, sizeof(pem_buf), der_buf, &der_len); if (der_len <= 0) break; if (fwrite(der_buf, 1, der_len, df) != der_len) break; } else if (strstr(pem_buf, "-----BEGIN ") != NULL) { writing = true; } } clean: if (pf != NULL) fclose(pf); if (df != NULL) { fclose(df); if (!ret) remove(der_file); } return ret; } #if MG_ENABLE_FILESYSTEM && defined(MG_FS_SLFS) /* If the file's extension is .pem, convert it to DER format and put on SLFS. */ static char *sl_pem2der(const char *pem_file) { const char *pem_ext = strstr(pem_file, ".pem"); if (pem_ext == NULL || *(pem_ext + 4) != '\0') { return strdup(pem_file); } char *der_file = NULL; /* DER file must be located on SLFS, add prefix. */ int l = mg_asprintf(&der_file, 0, "SL:%.*s.der", (int) (pem_ext - pem_file), pem_file); if (der_file == NULL) return NULL; bool result = false; cs_stat_t st; if (mg_stat(der_file, &st) != 0) { result = pem_to_der(pem_file, der_file); LOG(LL_DEBUG, ("%s -> %s = %d", pem_file, der_file, result)); } else { /* File exists, assume it's already been converted. */ result = true; } if (result) { /* Strip the SL: prefix we added since NWP does not expect it. */ memmove(der_file, der_file + 3, l - 2 /* including \0 */); } else { MG_FREE(der_file); der_file = NULL; } return der_file; } #else static char *sl_pem2der(const char *pem_file) { return strdup(pem_file); } #endif int sl_set_ssl_opts(struct mg_connection *nc) { int err; struct mg_ssl_if_ctx *ctx = (struct mg_ssl_if_ctx *) nc->ssl_if_data; DBG(("%p ssl ctx: %p", nc, ctx)); if (ctx != NULL) { DBG(("%p %s,%s,%s,%s", nc, (ctx->ssl_cert ? ctx->ssl_cert : "-"), (ctx->ssl_key ? ctx->ssl_cert : "-"), (ctx->ssl_ca_cert ? ctx->ssl_ca_cert : "-"), (ctx->ssl_server_name ? ctx->ssl_server_name : "-"))); if (ctx->ssl_cert != NULL && ctx->ssl_key != NULL) { char *ssl_cert = sl_pem2der(ctx->ssl_cert); char *ssl_key = sl_pem2der(ctx->ssl_key); if (ssl_cert != NULL && ssl_key != NULL) { err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET, SL_SO_SECURE_FILES_CERTIFICATE_FILE_NAME, ssl_cert, strlen(ssl_cert)); LOG(LL_INFO, ("CERTIFICATE_FILE_NAME %s -> %d", ssl_cert, err)); err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET, SL_SO_SECURE_FILES_PRIVATE_KEY_FILE_NAME, ssl_key, strlen(ssl_key)); LOG(LL_INFO, ("PRIVATE_KEY_FILE_NAME %s -> %d", ssl_key, err)); } else { err = -1; } MG_FREE(ssl_cert); MG_FREE(ssl_key); if (err != 0) return err; } if (ctx->ssl_ca_cert != NULL) { if (ctx->ssl_ca_cert[0] != '\0') { char *ssl_ca_cert = sl_pem2der(ctx->ssl_ca_cert); if (ssl_ca_cert != NULL) { err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET, SL_SO_SECURE_FILES_CA_FILE_NAME, ssl_ca_cert, strlen(ssl_ca_cert)); LOG(LL_INFO, ("CA_FILE_NAME %s -> %d", ssl_ca_cert, err)); } else { err = -1; } MG_FREE(ssl_ca_cert); if (err != 0) return err; } } if (ctx->ssl_server_name != NULL) { err = sl_SetSockOpt(nc->sock, SL_SOL_SOCKET, SO_SECURE_DOMAIN_NAME_VERIFICATION, ctx->ssl_server_name, strlen(ctx->ssl_server_name)); DBG(("DOMAIN_NAME_VERIFICATION %s -> %d", ctx->ssl_server_name, err)); /* Domain name verificationw as added in a NWP service pack, older * versions return SL_ENOPROTOOPT. There isn't much we can do about it, * so we ignore the error. */ if (err != 0 && err != SL_ENOPROTOOPT) return err; } } return 0; } #endif /* MG_ENABLE_SSL && MG_SSL_IF == MG_SSL_IF_SIMPLELINK */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/lwip/mg_lwip_net_if.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_PLATFORMS_LWIP_MG_NET_IF_LWIP_H_ #define CS_COMMON_PLATFORMS_LWIP_MG_NET_IF_LWIP_H_ #ifndef MG_ENABLE_NET_IF_LWIP_LOW_LEVEL #define MG_ENABLE_NET_IF_LWIP_LOW_LEVEL MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL #endif #if MG_ENABLE_NET_IF_LWIP_LOW_LEVEL #include <stdint.h> extern const struct mg_iface_vtable mg_lwip_iface_vtable; struct mg_lwip_conn_state { struct mg_connection *nc; struct mg_connection *lc; union { struct tcp_pcb *tcp; struct udp_pcb *udp; } pcb; err_t err; size_t num_sent; /* Number of acknowledged bytes to be reported to the core */ struct pbuf *rx_chain; /* Chain of incoming data segments. */ size_t rx_offset; /* Offset within the first pbuf (if partially consumed) */ /* Last SSL write size, for retries. */ int last_ssl_write_size; /* Whether MG_SIG_RECV is already pending for this connection */ int recv_pending; }; enum mg_sig_type { MG_SIG_CONNECT_RESULT = 1, MG_SIG_RECV = 2, MG_SIG_SENT_CB = 3, MG_SIG_CLOSE_CONN = 4, MG_SIG_TOMBSTONE = 5, MG_SIG_ACCEPT = 6, }; void mg_lwip_post_signal(enum mg_sig_type sig, struct mg_connection *nc); /* To be implemented by the platform. */ void mg_lwip_mgr_schedule_poll(struct mg_mgr *mgr); #endif /* MG_ENABLE_NET_IF_LWIP_LOW_LEVEL */ #endif /* CS_COMMON_PLATFORMS_LWIP_MG_NET_IF_LWIP_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/lwip/mg_lwip_net_if.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_NET_IF_LWIP_LOW_LEVEL /* Amalgamated: #include "common/mg_mem.h" */ #include <lwip/pbuf.h> #include <lwip/tcp.h> #if CS_PLATFORM != CS_P_STM32 #include <lwip/tcp_impl.h> #endif #include <lwip/udp.h> /* Amalgamated: #include "common/cs_dbg.h" */ /* * Depending on whether Mongoose is compiled with ipv6 support, use right * lwip functions */ #if MG_ENABLE_IPV6 #define TCP_NEW tcp_new_ip6 #define TCP_BIND tcp_bind_ip6 #define UDP_BIND udp_bind_ip6 #define IPADDR_NTOA(x) ip6addr_ntoa((const ip6_addr_t *)(x)) #define SET_ADDR(dst, src) \ memcpy((dst)->sin6.sin6_addr.s6_addr, (src)->ip6.addr, \ sizeof((dst)->sin6.sin6_addr.s6_addr)) #else #define TCP_NEW tcp_new #define TCP_BIND tcp_bind #define UDP_BIND udp_bind #define IPADDR_NTOA ipaddr_ntoa #define SET_ADDR(dst, src) (dst)->sin.sin_addr.s_addr = GET_IPV4(src) #endif /* * If lwip is compiled with ipv6 support, then API changes even for ipv4 */ #if !defined(LWIP_IPV6) || !LWIP_IPV6 #define GET_IPV4(ipX_addr) ((ipX_addr)->addr) #else #define GET_IPV4(ipX_addr) ((ipX_addr)->ip4.addr) #endif void mg_lwip_ssl_do_hs(struct mg_connection *nc); void mg_lwip_ssl_send(struct mg_connection *nc); void mg_lwip_ssl_recv(struct mg_connection *nc); void mg_lwip_if_init(struct mg_iface *iface); void mg_lwip_if_free(struct mg_iface *iface); void mg_lwip_if_add_conn(struct mg_connection *nc); void mg_lwip_if_remove_conn(struct mg_connection *nc); time_t mg_lwip_if_poll(struct mg_iface *iface, int timeout_ms); #ifdef RTOS_SDK extern void mgos_lock(); extern void mgos_unlock(); #else #define mgos_lock() #define mgos_unlock() #endif static void mg_lwip_recv_common(struct mg_connection *nc, struct pbuf *p); #if LWIP_TCP_KEEPALIVE void mg_lwip_set_keepalive_params(struct mg_connection *nc, int idle, int interval, int count) { if (nc->sock == INVALID_SOCKET || nc->flags & MG_F_UDP) { return; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct tcp_pcb *tpcb = cs->pcb.tcp; if (idle > 0 && interval > 0 && count > 0) { tpcb->keep_idle = idle * 1000; tpcb->keep_intvl = interval * 1000; tpcb->keep_cnt = count; tpcb->so_options |= SOF_KEEPALIVE; } else { tpcb->so_options &= ~SOF_KEEPALIVE; } } #elif !defined(MG_NO_LWIP_TCP_KEEPALIVE) #warning LWIP TCP keepalive is disabled. Please consider enabling it. #endif /* LWIP_TCP_KEEPALIVE */ static err_t mg_lwip_tcp_conn_cb(void *arg, struct tcp_pcb *tpcb, err_t err) { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p connect to %s:%u = %d", nc, IPADDR_NTOA(ipX_2_ip(&tpcb->remote_ip)), tpcb->remote_port, err)); if (nc == NULL) { tcp_abort(tpcb); return ERR_ARG; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; cs->err = err; #if LWIP_TCP_KEEPALIVE if (err == 0) mg_lwip_set_keepalive_params(nc, 60, 10, 6); #endif mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); return ERR_OK; } static void mg_lwip_tcp_error_cb(void *arg, err_t err) { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p conn error %d", nc, err)); if (nc == NULL) return; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; cs->pcb.tcp = NULL; /* Has already been deallocated */ if (nc->flags & MG_F_CONNECTING) { cs->err = err; mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); } else { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } } static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err)); if (p == NULL) { if (nc != NULL) { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } else { /* Tombstoned connection, do nothing. */ } return ERR_OK; } else if (nc == NULL) { tcp_abort(tpcb); return ERR_ARG; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* * If we get a chain of more than one segment at once, we need to bump * refcount on the subsequent bufs to make them independent. */ if (p->next != NULL) { struct pbuf *q = p->next; for (; q != NULL; q = q->next) pbuf_ref(q); } if (cs->rx_chain == NULL) { cs->rx_offset = 0; } else if (pbuf_clen(cs->rx_chain) >= 4) { /* ESP SDK has a limited pool of 5 pbufs. We must not hog them all or RX * will be completely blocked. We already have at least 4 in the chain, * this one is, so we have to make a copy and release this one. */ struct pbuf *np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); if (np != NULL) { pbuf_copy(np, p); pbuf_free(p); p = np; } } mg_lwip_recv_common(nc, p); return ERR_OK; } static void mg_lwip_handle_recv_tcp(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; #if MG_ENABLE_SSL if (nc->flags & MG_F_SSL) { if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) { mg_lwip_ssl_recv(nc); } else { mg_lwip_ssl_do_hs(nc); } return; } #endif mgos_lock(); while (cs->rx_chain != NULL) { struct pbuf *seg = cs->rx_chain; size_t len = (seg->len - cs->rx_offset); char *data = (char *) MG_MALLOC(len); if (data == NULL) { mgos_unlock(); DBG(("OOM")); return; } pbuf_copy_partial(seg, data, len, cs->rx_offset); cs->rx_offset += len; if (cs->rx_offset == cs->rx_chain->len) { cs->rx_chain = pbuf_dechain(cs->rx_chain); pbuf_free(seg); cs->rx_offset = 0; } mgos_unlock(); mg_if_recv_tcp_cb(nc, data, len, 1 /* own */); mgos_lock(); } mgos_unlock(); if (nc->send_mbuf.len > 0) { mg_lwip_mgr_schedule_poll(nc->mgr); } } static err_t mg_lwip_tcp_sent_cb(void *arg, struct tcp_pcb *tpcb, u16_t num_sent) { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p %p %u", nc, tpcb, num_sent)); if (nc == NULL) { tcp_abort(tpcb); return ERR_ABRT; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; cs->num_sent += num_sent; mg_lwip_post_signal(MG_SIG_SENT_CB, nc); return ERR_OK; } void mg_lwip_if_connect_tcp(struct mg_connection *nc, const union socket_address *sa) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct tcp_pcb *tpcb = TCP_NEW(); cs->pcb.tcp = tpcb; ip_addr_t *ip = (ip_addr_t *) &sa->sin.sin_addr.s_addr; u16_t port = ntohs(sa->sin.sin_port); tcp_arg(tpcb, nc); tcp_err(tpcb, mg_lwip_tcp_error_cb); tcp_sent(tpcb, mg_lwip_tcp_sent_cb); tcp_recv(tpcb, mg_lwip_tcp_recv_cb); cs->err = TCP_BIND(tpcb, IP_ADDR_ANY, 0 /* any port */); DBG(("%p tcp_bind = %d", nc, cs->err)); if (cs->err != ERR_OK) { mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); return; } cs->err = tcp_connect(tpcb, ip, port, mg_lwip_tcp_conn_cb); DBG(("%p tcp_connect %p = %d", nc, tpcb, cs->err)); if (cs->err != ERR_OK) { mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); return; } } /* * Lwip included in the SDKs for nRF5x chips has different type for the * callback of `udp_recv()` */ #if CS_PLATFORM == CS_P_NRF51 || CS_PLATFORM == CS_P_NRF52 || \ CS_PLATFORM == CS_P_STM32 static void mg_lwip_udp_recv_cb(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) #else static void mg_lwip_udp_recv_cb(void *arg, struct udp_pcb *pcb, struct pbuf *p, ip_addr_t *addr, u16_t port) #endif { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p %s:%u %p %u %u", nc, IPADDR_NTOA(addr), port, p, p->ref, p->len)); /* Put address in a separate pbuf and tack it onto the packet. */ struct pbuf *sap = pbuf_alloc(PBUF_RAW, sizeof(union socket_address), PBUF_RAM); if (sap == NULL) { pbuf_free(p); return; } union socket_address *sa = (union socket_address *) sap->payload; sa->sin.sin_addr.s_addr = addr->addr; sa->sin.sin_port = htons(port); /* Logic in the recv handler requires that there be exactly one data pbuf. */ p = pbuf_coalesce(p, PBUF_RAW); pbuf_chain(sap, p); mg_lwip_recv_common(nc, sap); (void) pcb; } static void mg_lwip_recv_common(struct mg_connection *nc, struct pbuf *p) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; mgos_lock(); if (cs->rx_chain == NULL) { cs->rx_chain = p; } else { pbuf_chain(cs->rx_chain, p); } if (!cs->recv_pending) { cs->recv_pending = 1; mg_lwip_post_signal(MG_SIG_RECV, nc); } mgos_unlock(); } static void mg_lwip_handle_recv_udp(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* * For UDP, RX chain consists of interleaved address and packet bufs: * Address pbuf followed by exactly one data pbuf (recv_cb took care of that). */ while (cs->rx_chain != NULL) { struct pbuf *sap = cs->rx_chain; struct pbuf *p = sap->next; cs->rx_chain = pbuf_dechain(p); size_t data_len = p->len; char *data = (char *) MG_MALLOC(data_len); if (data != NULL) { pbuf_copy_partial(p, data, data_len, 0); pbuf_free(p); mg_if_recv_udp_cb(nc, data, data_len, (union socket_address *) sap->payload, sap->len); pbuf_free(sap); } else { pbuf_free(p); pbuf_free(sap); } } } void mg_lwip_if_connect_udp(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct udp_pcb *upcb = udp_new(); cs->err = UDP_BIND(upcb, IP_ADDR_ANY, 0 /* any port */); DBG(("%p udp_bind %p = %d", nc, upcb, cs->err)); if (cs->err == ERR_OK) { udp_recv(upcb, mg_lwip_udp_recv_cb, nc); cs->pcb.udp = upcb; } else { udp_remove(upcb); } mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); } void mg_lwip_accept_conn(struct mg_connection *nc, struct tcp_pcb *tpcb) { union socket_address sa; SET_ADDR(&sa, &tpcb->remote_ip); sa.sin.sin_port = htons(tpcb->remote_port); mg_if_accept_tcp_cb(nc, &sa, sizeof(sa.sin)); } void mg_lwip_handle_accept(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; #if MG_ENABLE_SSL if (cs->lc->flags & MG_F_SSL) { if (mg_ssl_if_conn_accept(nc, cs->lc) != MG_SSL_OK) { LOG(LL_ERROR, ("SSL error")); tcp_close(cs->pcb.tcp); } } else #endif { mg_lwip_accept_conn(nc, cs->pcb.tcp); } } static err_t mg_lwip_accept_cb(void *arg, struct tcp_pcb *newtpcb, err_t err) { struct mg_connection *lc = (struct mg_connection *) arg; DBG(("%p conn %p from %s:%u", lc, newtpcb, IPADDR_NTOA(ipX_2_ip(&newtpcb->remote_ip)), newtpcb->remote_port)); struct mg_connection *nc = mg_if_accept_new_conn(lc); if (nc == NULL) { tcp_abort(newtpcb); return ERR_ABRT; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; cs->lc = lc; cs->pcb.tcp = newtpcb; /* We need to set up callbacks before returning because data may start * arriving immediately. */ tcp_arg(newtpcb, nc); tcp_err(newtpcb, mg_lwip_tcp_error_cb); tcp_sent(newtpcb, mg_lwip_tcp_sent_cb); tcp_recv(newtpcb, mg_lwip_tcp_recv_cb); #if LWIP_TCP_KEEPALIVE mg_lwip_set_keepalive_params(nc, 60, 10, 6); #endif mg_lwip_post_signal(MG_SIG_ACCEPT, nc); (void) err; return ERR_OK; } int mg_lwip_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct tcp_pcb *tpcb = TCP_NEW(); ip_addr_t *ip = (ip_addr_t *) &sa->sin.sin_addr.s_addr; u16_t port = ntohs(sa->sin.sin_port); cs->err = TCP_BIND(tpcb, ip, port); DBG(("%p tcp_bind(%s:%u) = %d", nc, IPADDR_NTOA(ip), port, cs->err)); if (cs->err != ERR_OK) { tcp_close(tpcb); return -1; } tcp_arg(tpcb, nc); tpcb = tcp_listen(tpcb); cs->pcb.tcp = tpcb; tcp_accept(tpcb, mg_lwip_accept_cb); return 0; } int mg_lwip_if_listen_udp(struct mg_connection *nc, union socket_address *sa) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct udp_pcb *upcb = udp_new(); ip_addr_t *ip = (ip_addr_t *) &sa->sin.sin_addr.s_addr; u16_t port = ntohs(sa->sin.sin_port); cs->err = UDP_BIND(upcb, ip, port); DBG(("%p udb_bind(%s:%u) = %d", nc, IPADDR_NTOA(ip), port, cs->err)); if (cs->err != ERR_OK) { udp_remove(upcb); return -1; } udp_recv(upcb, mg_lwip_udp_recv_cb, nc); cs->pcb.udp = upcb; return 0; } int mg_lwip_tcp_write(struct mg_connection *nc, const void *data, uint16_t len) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct tcp_pcb *tpcb = cs->pcb.tcp; if (tpcb == NULL) return -1; len = MIN(tpcb->mss, MIN(len, tpcb->snd_buf)); if (len == 0) { DBG(("%p no buf avail %u %u %u %p %p", tpcb, tpcb->acked, tpcb->snd_buf, tpcb->snd_queuelen, tpcb->unsent, tpcb->unacked)); tcp_output(tpcb); return 0; } /* * On ESP8266 we only allow one TCP segment in flight at any given time. * This may increase latency and reduce efficiency of tcp windowing, * but memory is scarce and precious on that platform so we do this to * reduce footprint. */ #if CS_PLATFORM == CS_P_ESP8266 if (tpcb->unacked != NULL) { return 0; } if (tpcb->unsent != NULL) { len = MIN(len, (TCP_MSS - tpcb->unsent->len)); } #endif err_t err = tcp_write(tpcb, data, len, TCP_WRITE_FLAG_COPY); DBG(("%p tcp_write %u = %d", tpcb, len, err)); if (err != ERR_OK) { /* * We ignore ERR_MEM because memory will be freed up when the data is sent * and we'll retry. */ return (err == ERR_MEM ? 0 : -1); } return len; } static void mg_lwip_send_more(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; if (nc->sock == INVALID_SOCKET || cs->pcb.tcp == NULL) { DBG(("%p invalid socket", nc)); return; } int num_written = mg_lwip_tcp_write(nc, nc->send_mbuf.buf, nc->send_mbuf.len); DBG(("%p mg_lwip_tcp_write %u = %d", nc, nc->send_mbuf.len, num_written)); if (num_written == 0) return; if (num_written < 0) { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } mbuf_remove(&nc->send_mbuf, num_written); mbuf_trim(&nc->send_mbuf); } void mg_lwip_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); mg_lwip_mgr_schedule_poll(nc->mgr); } void mg_lwip_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; if (nc->sock == INVALID_SOCKET || cs->pcb.udp == NULL) { /* * In case of UDP, this usually means, what * async DNS resolve is still in progress and connection * is not ready yet */ DBG(("%p socket is not connected", nc)); return; } struct udp_pcb *upcb = cs->pcb.udp; struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM); ip_addr_t *ip = (ip_addr_t *) &nc->sa.sin.sin_addr.s_addr; u16_t port = ntohs(nc->sa.sin.sin_port); if (p == NULL) { DBG(("OOM")); return; } memcpy(p->payload, buf, len); cs->err = udp_sendto(upcb, p, (ip_addr_t *) ip, port); DBG(("%p udp_sendto = %d", nc, cs->err)); pbuf_free(p); if (cs->err != ERR_OK) { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } else { cs->num_sent += len; mg_lwip_post_signal(MG_SIG_SENT_CB, nc); } } void mg_lwip_if_recved(struct mg_connection *nc, size_t len) { if (nc->flags & MG_F_UDP) return; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; if (nc->sock == INVALID_SOCKET || cs->pcb.tcp == NULL) { DBG(("%p invalid socket", nc)); return; } DBG(("%p %p %u", nc, cs->pcb.tcp, len)); /* Currently SSL acknowledges data immediately. * TODO(rojer): Find a way to propagate mg_lwip_if_recved. */ #if MG_ENABLE_SSL if (!(nc->flags & MG_F_SSL)) { tcp_recved(cs->pcb.tcp, len); } #else tcp_recved(cs->pcb.tcp, len); #endif mbuf_trim(&nc->recv_mbuf); } int mg_lwip_if_create_conn(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) MG_CALLOC(1, sizeof(*cs)); if (cs == NULL) return 0; cs->nc = nc; nc->sock = (intptr_t) cs; return 1; } void mg_lwip_if_destroy_conn(struct mg_connection *nc) { if (nc->sock == INVALID_SOCKET) return; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; if (!(nc->flags & MG_F_UDP)) { struct tcp_pcb *tpcb = cs->pcb.tcp; if (tpcb != NULL) { tcp_arg(tpcb, NULL); DBG(("%p tcp_close %p", nc, tpcb)); tcp_arg(tpcb, NULL); tcp_close(tpcb); } while (cs->rx_chain != NULL) { struct pbuf *seg = cs->rx_chain; cs->rx_chain = pbuf_dechain(cs->rx_chain); pbuf_free(seg); } memset(cs, 0, sizeof(*cs)); MG_FREE(cs); } else if (nc->listener == NULL) { /* Only close outgoing UDP pcb or listeners. */ struct udp_pcb *upcb = cs->pcb.udp; if (upcb != NULL) { DBG(("%p udp_remove %p", nc, upcb)); udp_remove(upcb); } memset(cs, 0, sizeof(*cs)); MG_FREE(cs); } nc->sock = INVALID_SOCKET; } void mg_lwip_if_get_conn_addr(struct mg_connection *nc, int remote, union socket_address *sa) { memset(sa, 0, sizeof(*sa)); if (nc->sock == INVALID_SOCKET) return; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; if (nc->flags & MG_F_UDP) { struct udp_pcb *upcb = cs->pcb.udp; if (remote) { memcpy(sa, &nc->sa, sizeof(*sa)); } else { sa->sin.sin_port = htons(upcb->local_port); SET_ADDR(sa, &upcb->local_ip); } } else { struct tcp_pcb *tpcb = cs->pcb.tcp; if (remote) { sa->sin.sin_port = htons(tpcb->remote_port); SET_ADDR(sa, &tpcb->remote_ip); } else { sa->sin.sin_port = htons(tpcb->local_port); SET_ADDR(sa, &tpcb->local_ip); } } } void mg_lwip_if_sock_set(struct mg_connection *nc, sock_t sock) { nc->sock = sock; } /* clang-format off */ #define MG_LWIP_IFACE_VTABLE \ { \ mg_lwip_if_init, \ mg_lwip_if_free, \ mg_lwip_if_add_conn, \ mg_lwip_if_remove_conn, \ mg_lwip_if_poll, \ mg_lwip_if_listen_tcp, \ mg_lwip_if_listen_udp, \ mg_lwip_if_connect_tcp, \ mg_lwip_if_connect_udp, \ mg_lwip_if_tcp_send, \ mg_lwip_if_udp_send, \ mg_lwip_if_recved, \ mg_lwip_if_create_conn, \ mg_lwip_if_destroy_conn, \ mg_lwip_if_sock_set, \ mg_lwip_if_get_conn_addr, \ } /* clang-format on */ const struct mg_iface_vtable mg_lwip_iface_vtable = MG_LWIP_IFACE_VTABLE; #if MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL const struct mg_iface_vtable mg_default_iface_vtable = MG_LWIP_IFACE_VTABLE; #endif #endif /* MG_ENABLE_NET_IF_LWIP_LOW_LEVEL */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/lwip/mg_lwip_ev_mgr.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL #ifndef MG_SIG_QUEUE_LEN #define MG_SIG_QUEUE_LEN 32 #endif struct mg_ev_mgr_lwip_signal { int sig; struct mg_connection *nc; }; struct mg_ev_mgr_lwip_data { struct mg_ev_mgr_lwip_signal sig_queue[MG_SIG_QUEUE_LEN]; int sig_queue_len; int start_index; }; void mg_lwip_post_signal(enum mg_sig_type sig, struct mg_connection *nc) { struct mg_ev_mgr_lwip_data *md = (struct mg_ev_mgr_lwip_data *) nc->iface->data; mgos_lock(); if (md->sig_queue_len >= MG_SIG_QUEUE_LEN) { mgos_unlock(); return; } int end_index = (md->start_index + md->sig_queue_len) % MG_SIG_QUEUE_LEN; md->sig_queue[end_index].sig = sig; md->sig_queue[end_index].nc = nc; md->sig_queue_len++; mg_lwip_mgr_schedule_poll(nc->mgr); mgos_unlock(); } void mg_ev_mgr_lwip_process_signals(struct mg_mgr *mgr) { struct mg_ev_mgr_lwip_data *md = (struct mg_ev_mgr_lwip_data *) mgr->ifaces[MG_MAIN_IFACE]->data; while (md->sig_queue_len > 0) { mgos_lock(); int sig = md->sig_queue[md->start_index].sig; struct mg_connection *nc = md->sig_queue[md->start_index].nc; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; md->start_index = (md->start_index + 1) % MG_SIG_QUEUE_LEN; md->sig_queue_len--; mgos_unlock(); if (nc->iface == NULL || nc->mgr == NULL) continue; switch (sig) { case MG_SIG_CONNECT_RESULT: { #if MG_ENABLE_SSL if (cs->err == 0 && (nc->flags & MG_F_SSL) && !(nc->flags & MG_F_SSL_HANDSHAKE_DONE)) { mg_lwip_ssl_do_hs(nc); } else #endif { mg_if_connect_cb(nc, cs->err); } break; } case MG_SIG_CLOSE_CONN: { nc->flags |= MG_F_CLOSE_IMMEDIATELY; mg_close_conn(nc); break; } case MG_SIG_RECV: { cs->recv_pending = 0; if (nc->flags & MG_F_UDP) { mg_lwip_handle_recv_udp(nc); } else { mg_lwip_handle_recv_tcp(nc); } break; } case MG_SIG_SENT_CB: { if (cs->num_sent > 0) mg_if_sent_cb(nc, cs->num_sent); cs->num_sent = 0; if (nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE) && !(nc->flags & MG_F_WANT_WRITE)) { mg_close_conn(nc); } break; } case MG_SIG_TOMBSTONE: { break; } case MG_SIG_ACCEPT: { mg_lwip_handle_accept(nc); break; } } } } void mg_lwip_if_init(struct mg_iface *iface) { LOG(LL_INFO, ("%p Mongoose init")); iface->data = MG_CALLOC(1, sizeof(struct mg_ev_mgr_lwip_data)); } void mg_lwip_if_free(struct mg_iface *iface) { MG_FREE(iface->data); iface->data = NULL; } void mg_lwip_if_add_conn(struct mg_connection *nc) { (void) nc; } void mg_lwip_if_remove_conn(struct mg_connection *nc) { struct mg_ev_mgr_lwip_data *md = (struct mg_ev_mgr_lwip_data *) nc->iface->data; /* Walk the queue and null-out further signals for this conn. */ for (int i = 0; i < MG_SIG_QUEUE_LEN; i++) { if (md->sig_queue[i].nc == nc) { md->sig_queue[i].sig = MG_SIG_TOMBSTONE; } } } time_t mg_lwip_if_poll(struct mg_iface *iface, int timeout_ms) { struct mg_mgr *mgr = iface->mgr; int n = 0; double now = mg_time(); struct mg_connection *nc, *tmp; double min_timer = 0; int num_timers = 0; #if 0 DBG(("begin poll @%u", (unsigned int) (now * 1000))); #endif mg_ev_mgr_lwip_process_signals(mgr); for (nc = mgr->active_connections; nc != NULL; nc = tmp) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; tmp = nc->next; n++; if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) || ((nc->flags & MG_F_SEND_AND_CLOSE) && (nc->flags & MG_F_UDP) && (nc->send_mbuf.len == 0))) { mg_close_conn(nc); continue; } mg_if_poll(nc, now); mg_if_timer(nc, now); #if MG_ENABLE_SSL if ((nc->flags & MG_F_SSL) && cs != NULL && cs->pcb.tcp != NULL && cs->pcb.tcp->state == ESTABLISHED) { if (((nc->flags & MG_F_WANT_WRITE) || ((nc->send_mbuf.len > 0) && (nc->flags & MG_F_SSL_HANDSHAKE_DONE))) && cs->pcb.tcp->snd_buf > 0) { /* Can write more. */ if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) { if (!(nc->flags & MG_F_CONNECTING)) mg_lwip_ssl_send(nc); } else { mg_lwip_ssl_do_hs(nc); } } if (cs->rx_chain != NULL || (nc->flags & MG_F_WANT_READ)) { if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) { if (!(nc->flags & MG_F_CONNECTING)) mg_lwip_ssl_recv(nc); } else { mg_lwip_ssl_do_hs(nc); } } } else #endif /* MG_ENABLE_SSL */ { if (!(nc->flags & (MG_F_CONNECTING | MG_F_UDP))) { if (nc->send_mbuf.len > 0) mg_lwip_send_more(nc); } } if (nc->sock != INVALID_SOCKET && !(nc->flags & (MG_F_UDP | MG_F_LISTENING)) && cs->pcb.tcp != NULL && cs->pcb.tcp->unsent != NULL) { tcp_output(cs->pcb.tcp); } if (nc->ev_timer_time > 0) { if (num_timers == 0 || nc->ev_timer_time < min_timer) { min_timer = nc->ev_timer_time; } num_timers++; } } #if 0 DBG(("end poll @%u, %d conns, %d timers (min %u), next in %d ms", (unsigned int) (now * 1000), n, num_timers, (unsigned int) (min_timer * 1000), timeout_ms)); #endif (void) timeout_ms; return now; } uint32_t mg_lwip_get_poll_delay_ms(struct mg_mgr *mgr) { struct mg_connection *nc; double now = mg_time(); double min_timer = 0; int num_timers = 0; mg_ev_mgr_lwip_process_signals(mgr); for (nc = mg_next(mgr, NULL); nc != NULL; nc = mg_next(mgr, nc)) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; if (nc->ev_timer_time > 0) { if (num_timers == 0 || nc->ev_timer_time < min_timer) { min_timer = nc->ev_timer_time; } num_timers++; } if (nc->send_mbuf.len > 0) { int can_send = 0; /* We have stuff to send, but can we? */ if (nc->flags & MG_F_UDP) { /* UDP is always ready for sending. */ can_send = (cs->pcb.udp != NULL); } else { can_send = (cs->pcb.tcp != NULL && cs->pcb.tcp->snd_buf > 0); } /* We want and can send, request a poll immediately. */ if (can_send) return 0; } } uint32_t timeout_ms = ~0; if (num_timers > 0) { double timer_timeout_ms = (min_timer - now) * 1000 + 1 /* rounding */; if (timer_timeout_ms < timeout_ms) { timeout_ms = timer_timeout_ms; } } return timeout_ms; } #endif /* MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/lwip/mg_lwip_ssl_if.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_SSL && MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL /* Amalgamated: #include "common/mg_mem.h" */ /* Amalgamated: #include "common/cs_dbg.h" */ #include <lwip/pbuf.h> #include <lwip/tcp.h> #ifndef MG_LWIP_SSL_IO_SIZE #define MG_LWIP_SSL_IO_SIZE 1024 #endif /* * Stop processing incoming SSL traffic when recv_mbuf.size is this big. * It'a a uick solution for SSL recv pushback. */ #ifndef MG_LWIP_SSL_RECV_MBUF_LIMIT #define MG_LWIP_SSL_RECV_MBUF_LIMIT 3072 #endif #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif void mg_lwip_ssl_do_hs(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; int server_side = (nc->listener != NULL); enum mg_ssl_if_result res; if (nc->flags & MG_F_CLOSE_IMMEDIATELY) return; res = mg_ssl_if_handshake(nc); DBG(("%p %d %d %d", nc, nc->flags, server_side, res)); if (res != MG_SSL_OK) { if (res == MG_SSL_WANT_WRITE) { nc->flags |= MG_F_WANT_WRITE; cs->err = 0; } else if (res == MG_SSL_WANT_READ) { /* * Nothing to do in particular, we are callback-driven. * What we definitely do not need anymore is SSL reading (nothing left). */ nc->flags &= ~MG_F_WANT_READ; cs->err = 0; } else { cs->err = res; if (server_side) { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } else { mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); } } } else { cs->err = 0; nc->flags &= ~MG_F_WANT_WRITE; /* * Handshake is done. Schedule a read immediately to consume app data * which may already be waiting. */ nc->flags |= (MG_F_SSL_HANDSHAKE_DONE | MG_F_WANT_READ); if (server_side) { mg_lwip_accept_conn(nc, cs->pcb.tcp); } else { mg_lwip_post_signal(MG_SIG_CONNECT_RESULT, nc); } } } void mg_lwip_ssl_send(struct mg_connection *nc) { if (nc->sock == INVALID_SOCKET) { DBG(("%p invalid socket", nc)); return; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* It's ok if the buffer is empty. Return value of 0 may also be valid. */ int len = cs->last_ssl_write_size; if (len == 0) { len = MIN(MG_LWIP_SSL_IO_SIZE, nc->send_mbuf.len); } int ret = mg_ssl_if_write(nc, nc->send_mbuf.buf, len); DBG(("%p SSL_write %u = %d, %d", nc, len, ret)); if (ret > 0) { mbuf_remove(&nc->send_mbuf, ret); mbuf_trim(&nc->send_mbuf); cs->last_ssl_write_size = 0; } else if (ret < 0) { /* This is tricky. We must remember the exact data we were sending to retry * exactly the same send next time. */ cs->last_ssl_write_size = len; } if (ret == len) { nc->flags &= ~MG_F_WANT_WRITE; } else if (ret == MG_SSL_WANT_WRITE) { nc->flags |= MG_F_WANT_WRITE; } else { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } } void mg_lwip_ssl_recv(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* Don't deliver data before connect callback */ if (nc->flags & MG_F_CONNECTING) return; while (nc->recv_mbuf.len < MG_LWIP_SSL_RECV_MBUF_LIMIT) { char *buf = (char *) MG_MALLOC(MG_LWIP_SSL_IO_SIZE); if (buf == NULL) return; int ret = mg_ssl_if_read(nc, buf, MG_LWIP_SSL_IO_SIZE); DBG(("%p %p SSL_read %u = %d", nc, cs->rx_chain, MG_LWIP_SSL_IO_SIZE, ret)); if (ret <= 0) { MG_FREE(buf); if (ret == MG_SSL_WANT_WRITE) { nc->flags |= MG_F_WANT_WRITE; return; } else if (ret == MG_SSL_WANT_READ) { /* * Nothing to do in particular, we are callback-driven. * What we definitely do not need anymore is SSL reading (nothing left). */ nc->flags &= ~MG_F_WANT_READ; cs->err = 0; return; } else { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); return; } } else { mg_if_recv_tcp_cb(nc, buf, ret, 1 /* own */); } } } #ifdef KR_VERSION ssize_t kr_send(int fd, const void *buf, size_t len) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) fd; int ret = mg_lwip_tcp_write(cs->nc, buf, len); DBG(("%p mg_lwip_tcp_write %u = %d", cs->nc, len, ret)); if (ret == 0) ret = KR_IO_WOULDBLOCK; return ret; } ssize_t kr_recv(int fd, void *buf, size_t len) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) fd; struct pbuf *seg = cs->rx_chain; if (seg == NULL) { DBG(("%u - nothing to read", len)); return KR_IO_WOULDBLOCK; } size_t seg_len = (seg->len - cs->rx_offset); DBG(("%u %u %u %u", len, cs->rx_chain->len, seg_len, cs->rx_chain->tot_len)); len = MIN(len, seg_len); pbuf_copy_partial(seg, buf, len, cs->rx_offset); cs->rx_offset += len; tcp_recved(cs->pcb.tcp, len); if (cs->rx_offset == cs->rx_chain->len) { cs->rx_chain = pbuf_dechain(cs->rx_chain); pbuf_free(seg); cs->rx_offset = 0; } return len; } #elif MG_SSL_IF == MG_SSL_IF_MBEDTLS int ssl_socket_send(void *ctx, const unsigned char *buf, size_t len) { struct mg_connection *nc = (struct mg_connection *) ctx; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; int ret = mg_lwip_tcp_write(cs->nc, buf, len); LOG(LL_DEBUG, ("%p %d -> %d", nc, len, ret)); if (ret == 0) ret = MBEDTLS_ERR_SSL_WANT_WRITE; return ret; } int ssl_socket_recv(void *ctx, unsigned char *buf, size_t len) { struct mg_connection *nc = (struct mg_connection *) ctx; struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; struct pbuf *seg = cs->rx_chain; if (seg == NULL) { DBG(("%u - nothing to read", len)); return MBEDTLS_ERR_SSL_WANT_READ; } size_t seg_len = (seg->len - cs->rx_offset); DBG(("%u %u %u %u", len, cs->rx_chain->len, seg_len, cs->rx_chain->tot_len)); len = MIN(len, seg_len); pbuf_copy_partial(seg, buf, len, cs->rx_offset); cs->rx_offset += len; /* TCP PCB may be NULL if connection has already been closed * but we still have data to deliver to SSL. */ if (cs->pcb.tcp != NULL) tcp_recved(cs->pcb.tcp, len); if (cs->rx_offset == cs->rx_chain->len) { cs->rx_chain = pbuf_dechain(cs->rx_chain); pbuf_free(seg); cs->rx_offset = 0; } LOG(LL_DEBUG, ("%p <- %d", nc, (int) len)); return len; } #endif #endif /* MG_ENABLE_SSL && MG_NET_IF == MG_NET_IF_LWIP_LOW_LEVEL */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/wince/wince_libc.c" #endif /* * Copyright (c) 2016 Cesanta Software Limited * All rights reserved */ #ifdef WINCE const char *strerror(int err) { /* * TODO(alashkin): there is no strerror on WinCE; * look for similar wce_xxxx function */ static char buf[10]; snprintf(buf, sizeof(buf), "%d", err); return buf; } int open(const char *filename, int oflag, int pmode) { /* * TODO(alashkin): mg_open function is not used in mongoose * but exists in documentation as utility function * Shall we delete it at all or implement for WinCE as well? */ DebugBreak(); return 0; /* for compiler */ } int _wstati64(const wchar_t *path, cs_stat_t *st) { DWORD fa = GetFileAttributesW(path); if (fa == INVALID_FILE_ATTRIBUTES) { return -1; } memset(st, 0, sizeof(*st)); if ((fa & FILE_ATTRIBUTE_DIRECTORY) == 0) { HANDLE h; FILETIME ftime; st->st_mode |= _S_IFREG; h = CreateFileW(path, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (h == INVALID_HANDLE_VALUE) { return -1; } st->st_size = GetFileSize(h, NULL); GetFileTime(h, NULL, NULL, &ftime); st->st_mtime = (uint32_t)((((uint64_t) ftime.dwLowDateTime + ((uint64_t) ftime.dwHighDateTime << 32)) / 10000000.0) - 11644473600); CloseHandle(h); } else { st->st_mode |= _S_IFDIR; } return 0; } /* Windows CE doesn't have neither gmtime nor strftime */ static void mg_gmt_time_string(char *buf, size_t buf_len, time_t *t) { FILETIME ft; SYSTEMTIME systime; if (t != NULL) { uint64_t filetime = (*t + 11644473600) * 10000000; ft.dwLowDateTime = filetime & 0xFFFFFFFF; ft.dwHighDateTime = (filetime & 0xFFFFFFFF00000000) >> 32; FileTimeToSystemTime(&ft, &systime); } else { GetSystemTime(&systime); } /* There is no PRIu16 in WinCE SDK */ snprintf(buf, buf_len, "%d.%d.%d %d:%d:%d GMT", (int) systime.wYear, (int) systime.wMonth, (int) systime.wDay, (int) systime.wHour, (int) systime.wMinute, (int) systime.wSecond); } #endif #ifdef MG_MODULE_LINES #line 1 "common/platforms/pic32/pic32_net_if.h" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #ifndef CS_COMMON_PLATFORMS_PIC32_NET_IF_H_ #define CS_COMMON_PLATFORMS_PIC32_NET_IF_H_ /* Amalgamated: #include "mongoose/src/net_if.h" */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifndef MG_ENABLE_NET_IF_PIC32 #define MG_ENABLE_NET_IF_PIC32 MG_NET_IF == MG_NET_IF_PIC32 #endif extern const struct mg_iface_vtable mg_pic32_iface_vtable; #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* CS_COMMON_PLATFORMS_PIC32_NET_IF_H_ */ #ifdef MG_MODULE_LINES #line 1 "common/platforms/pic32/pic32_net_if.c" #endif /* * Copyright (c) 2014-2016 Cesanta Software Limited * All rights reserved */ #if MG_ENABLE_NET_IF_PIC32 int mg_pic32_if_create_conn(struct mg_connection *nc) { (void) nc; return 1; } void mg_pic32_if_recved(struct mg_connection *nc, size_t len) { (void) nc; (void) len; } void mg_pic32_if_add_conn(struct mg_connection *nc) { (void) nc; } void mg_pic32_if_init(struct mg_iface *iface) { (void) iface; (void) mg_get_errno(); /* Shutup compiler */ } void mg_pic32_if_free(struct mg_iface *iface) { (void) iface; } void mg_pic32_if_remove_conn(struct mg_connection *nc) { (void) nc; } void mg_pic32_if_destroy_conn(struct mg_connection *nc) { if (nc->sock == INVALID_SOCKET) return; /* For UDP, only close outgoing sockets or listeners. */ if (!(nc->flags & MG_F_UDP)) { /* Close TCP */ TCPIP_TCP_Close((TCP_SOCKET) nc->sock); } else if (nc->listener == NULL) { /* Only close outgoing UDP or listeners. */ TCPIP_UDP_Close((UDP_SOCKET) nc->sock); } nc->sock = INVALID_SOCKET; } int mg_pic32_if_listen_udp(struct mg_connection *nc, union socket_address *sa) { nc->sock = TCPIP_UDP_ServerOpen( sa->sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4 : IP_ADDRESS_TYPE_IPV6, ntohs(sa->sin.sin_port), sa->sin.sin_addr.s_addr == 0 ? 0 : (IP_MULTI_ADDRESS *) &sa->sin); if (nc->sock == INVALID_SOCKET) { return -1; } return 0; } void mg_pic32_if_udp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); } void mg_pic32_if_tcp_send(struct mg_connection *nc, const void *buf, size_t len) { mbuf_append(&nc->send_mbuf, buf, len); } int mg_pic32_if_listen_tcp(struct mg_connection *nc, union socket_address *sa) { nc->sock = TCPIP_TCP_ServerOpen( sa->sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4 : IP_ADDRESS_TYPE_IPV6, ntohs(sa->sin.sin_port), sa->sin.sin_addr.s_addr == 0 ? 0 : (IP_MULTI_ADDRESS *) &sa->sin); memcpy(&nc->sa, sa, sizeof(*sa)); if (nc->sock == INVALID_SOCKET) { return -1; } return 0; } static int mg_accept_conn(struct mg_connection *lc) { struct mg_connection *nc; TCP_SOCKET_INFO si; union socket_address sa; nc = mg_if_accept_new_conn(lc); if (nc == NULL) { return 0; } nc->sock = lc->sock; nc->flags &= ~MG_F_LISTENING; if (!TCPIP_TCP_SocketInfoGet((TCP_SOCKET) nc->sock, &si)) { return 0; } if (si.addressType == IP_ADDRESS_TYPE_IPV4) { sa.sin.sin_family = AF_INET; sa.sin.sin_port = htons(si.remotePort); sa.sin.sin_addr.s_addr = si.remoteIPaddress.v4Add.Val; } else { /* TODO(alashkin): do something with _potential_ IPv6 */ memset(&sa, 0, sizeof(sa)); } mg_if_accept_tcp_cb(nc, (union socket_address *) &sa, sizeof(sa)); return mg_pic32_if_listen_tcp(lc, &lc->sa) >= 0; } char *inet_ntoa(struct in_addr in) { static char addr[17]; snprintf(addr, sizeof(addr), "%d.%d.%d.%d", (int) in.S_un.S_un_b.s_b1, (int) in.S_un.S_un_b.s_b2, (int) in.S_un.S_un_b.s_b3, (int) in.S_un.S_un_b.s_b4); return addr; } static void mg_handle_send(struct mg_connection *nc) { uint16_t bytes_written = 0; if (nc->flags & MG_F_UDP) { if (!TCPIP_UDP_RemoteBind( (UDP_SOCKET) nc->sock, nc->sa.sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4 : IP_ADDRESS_TYPE_IPV6, ntohs(nc->sa.sin.sin_port), (IP_MULTI_ADDRESS *) &nc->sa.sin)) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; return; } bytes_written = TCPIP_UDP_TxPutIsReady((UDP_SOCKET) nc->sock, 0); if (bytes_written >= nc->send_mbuf.len) { if (TCPIP_UDP_ArrayPut((UDP_SOCKET) nc->sock, (uint8_t *) nc->send_mbuf.buf, nc->send_mbuf.len) != nc->send_mbuf.len) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; bytes_written = 0; } } } else { bytes_written = TCPIP_TCP_FifoTxFreeGet((TCP_SOCKET) nc->sock); if (bytes_written != 0) { if (bytes_written > nc->send_mbuf.len) { bytes_written = nc->send_mbuf.len; } if (TCPIP_TCP_ArrayPut((TCP_SOCKET) nc->sock, (uint8_t *) nc->send_mbuf.buf, bytes_written) != bytes_written) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; bytes_written = 0; } } } if (bytes_written != 0) { mbuf_remove(&nc->send_mbuf, bytes_written); mg_if_sent_cb(nc, bytes_written); } } static void mg_handle_recv(struct mg_connection *nc) { uint16_t bytes_read = 0; uint8_t *buf = NULL; if (nc->flags & MG_F_UDP) { bytes_read = TCPIP_UDP_GetIsReady((UDP_SOCKET) nc->sock); if (bytes_read != 0 && (nc->recv_mbuf_limit == -1 || nc->recv_mbuf.len + bytes_read < nc->recv_mbuf_limit)) { buf = (uint8_t *) MG_MALLOC(bytes_read); if (TCPIP_UDP_ArrayGet((UDP_SOCKET) nc->sock, buf, bytes_read) != bytes_read) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; bytes_read = 0; MG_FREE(buf); } } } else { bytes_read = TCPIP_TCP_GetIsReady((TCP_SOCKET) nc->sock); if (bytes_read != 0) { if (nc->recv_mbuf_limit != -1 && nc->recv_mbuf_limit - nc->recv_mbuf.len > bytes_read) { bytes_read = nc->recv_mbuf_limit - nc->recv_mbuf.len; } buf = (uint8_t *) MG_MALLOC(bytes_read); if (TCPIP_TCP_ArrayGet((TCP_SOCKET) nc->sock, buf, bytes_read) != bytes_read) { nc->flags |= MG_F_CLOSE_IMMEDIATELY; MG_FREE(buf); bytes_read = 0; } } } if (bytes_read != 0) { mg_if_recv_tcp_cb(nc, buf, bytes_read, 1 /* own */); } } time_t mg_pic32_if_poll(struct mg_iface *iface, int timeout_ms) { struct mg_mgr *mgr = iface->mgr; double now = mg_time(); struct mg_connection *nc, *tmp; for (nc = mgr->active_connections; nc != NULL; nc = tmp) { tmp = nc->next; if (nc->flags & MG_F_CONNECTING) { /* processing connections */ if (nc->flags & MG_F_UDP || TCPIP_TCP_IsConnected((TCP_SOCKET) nc->sock)) { mg_if_connect_cb(nc, 0); } } else if (nc->flags & MG_F_LISTENING) { if (TCPIP_TCP_IsConnected((TCP_SOCKET) nc->sock)) { /* accept new connections */ mg_accept_conn(nc); } } else { if (nc->send_mbuf.len != 0) { mg_handle_send(nc); } if (nc->recv_mbuf_limit == -1 || nc->recv_mbuf.len < nc->recv_mbuf_limit) { mg_handle_recv(nc); } } } for (nc = mgr->active_connections; nc != NULL; nc = tmp) { tmp = nc->next; if ((nc->flags & MG_F_CLOSE_IMMEDIATELY) || (nc->send_mbuf.len == 0 && (nc->flags & MG_F_SEND_AND_CLOSE))) { mg_close_conn(nc); } } return now; } void mg_pic32_if_sock_set(struct mg_connection *nc, sock_t sock) { nc->sock = sock; } void mg_pic32_if_get_conn_addr(struct mg_connection *nc, int remote, union socket_address *sa) { /* TODO(alaskin): not implemented yet */ } void mg_pic32_if_connect_tcp(struct mg_connection *nc, const union socket_address *sa) { nc->sock = TCPIP_TCP_ClientOpen( sa->sin.sin_family == AF_INET ? IP_ADDRESS_TYPE_IPV4 : IP_ADDRESS_TYPE_IPV6, ntohs(sa->sin.sin_port), (IP_MULTI_ADDRESS *) &sa->sin); nc->err = (nc->sock == INVALID_SOCKET) ? -1 : 0; } void mg_pic32_if_connect_udp(struct mg_connection *nc) { nc->sock = TCPIP_UDP_ClientOpen(IP_ADDRESS_TYPE_ANY, 0, NULL); nc->err = (nc->sock == INVALID_SOCKET) ? -1 : 0; } /* clang-format off */ #define MG_PIC32_IFACE_VTABLE \ { \ mg_pic32_if_init, \ mg_pic32_if_free, \ mg_pic32_if_add_conn, \ mg_pic32_if_remove_conn, \ mg_pic32_if_poll, \ mg_pic32_if_listen_tcp, \ mg_pic32_if_listen_udp, \ mg_pic32_if_connect_tcp, \ mg_pic32_if_connect_udp, \ mg_pic32_if_tcp_send, \ mg_pic32_if_udp_send, \ mg_pic32_if_recved, \ mg_pic32_if_create_conn, \ mg_pic32_if_destroy_conn, \ mg_pic32_if_sock_set, \ mg_pic32_if_get_conn_addr, \ } /* clang-format on */ const struct mg_iface_vtable mg_pic32_iface_vtable = MG_PIC32_IFACE_VTABLE; #if MG_NET_IF == MG_NET_IF_PIC32 const struct mg_iface_vtable mg_default_iface_vtable = MG_PIC32_IFACE_VTABLE; #endif #endif /* MG_ENABLE_NET_IF_PIC32 */
./CrossVul/dataset_final_sorted/CWE-416/c/bad_3242_0
crossvul-cpp_data_good_5332_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) && (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (QuantumRange <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; if ((channel & RedChannel) != 0) { pixel=GetPixelRed(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & GreenChannel) != 0) { pixel=GetPixelGreen(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & BlueChannel) != 0) { pixel=GetPixelBlue(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { pixel=GetPixelOpacity(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel=GetPixelIndex(indexes+x); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse) atDepth=MagickTrue; if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->matte != MagickFalse)) type=GrayscaleMatteType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if type of the image is bi-level. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((channel & GreenChannel) != 0) image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].opacity),range),range); } } status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (QuantumRange <= MaxMap) RestoreMSCWarning { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelRed(q)),range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelGreen(q)),range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelBlue(q)),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelOpacity(q)),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (SetImageMonochrome(image,&image->exception) == MagickFalse) { status=TransformImageColorspace(image,GRAYColorspace); (void) NormalizeImage(image); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } status=AcquireImageColormap(image,2); image->matte=MagickFalse; break; } case GrayscaleType: { if (SetImageGray(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { if (SetImageGray(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-416/c/good_5332_0
crossvul-cpp_data_good_5171_1
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2015 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/php_var.h" #include "ext/standard/php_smart_str.h" #include "zend_interfaces.h" #include "zend_exceptions.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_array.h" #include "spl_exceptions.h" zend_object_handlers spl_handler_ArrayObject; PHPAPI zend_class_entry *spl_ce_ArrayObject; zend_object_handlers spl_handler_ArrayIterator; PHPAPI zend_class_entry *spl_ce_ArrayIterator; PHPAPI zend_class_entry *spl_ce_RecursiveArrayIterator; #define SPL_ARRAY_STD_PROP_LIST 0x00000001 #define SPL_ARRAY_ARRAY_AS_PROPS 0x00000002 #define SPL_ARRAY_CHILD_ARRAYS_ONLY 0x00000004 #define SPL_ARRAY_OVERLOADED_REWIND 0x00010000 #define SPL_ARRAY_OVERLOADED_VALID 0x00020000 #define SPL_ARRAY_OVERLOADED_KEY 0x00040000 #define SPL_ARRAY_OVERLOADED_CURRENT 0x00080000 #define SPL_ARRAY_OVERLOADED_NEXT 0x00100000 #define SPL_ARRAY_IS_REF 0x01000000 #define SPL_ARRAY_IS_SELF 0x02000000 #define SPL_ARRAY_USE_OTHER 0x04000000 #define SPL_ARRAY_INT_MASK 0xFFFF0000 #define SPL_ARRAY_CLONE_MASK 0x0300FFFF #define SPL_ARRAY_METHOD_NO_ARG 0 #define SPL_ARRAY_METHOD_USE_ARG 1 #define SPL_ARRAY_METHOD_MAY_USER_ARG 2 typedef struct _spl_array_object { zend_object std; zval *array; zval *retval; HashPosition pos; ulong pos_h; int ar_flags; int is_self; zend_function *fptr_offset_get; zend_function *fptr_offset_set; zend_function *fptr_offset_has; zend_function *fptr_offset_del; zend_function *fptr_count; zend_class_entry* ce_get_iterator; HashTable *debug_info; unsigned char nApplyCount; } spl_array_object; static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */ if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC); return spl_array_get_hash_table(other, check_std_props TSRMLS_CC); } else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else { return HASH_OF(intern->array); } } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC); static void spl_array_update_pos(spl_array_object* intern) /* {{{ */ { Bucket *pos = intern->pos; if (pos != NULL) { intern->pos_h = pos->h; } } /* }}} */ static void spl_array_set_pos(spl_array_object* intern, HashPosition pos) /* {{{ */ { intern->pos = pos; spl_array_update_pos(intern); } /* }}} */ SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht TSRMLS_DC) /* {{{ */ { Bucket *p; /* IS_CONSISTENT(ht);*/ /* HASH_PROTECT_RECURSION(ht);*/ p = ht->arBuckets[intern->pos_h & ht->nTableMask]; while (p != NULL) { if (p == intern->pos) { return SUCCESS; } p = p->pNext; } /* HASH_UNPROTECT_RECURSION(ht); */ spl_array_rewind(intern TSRMLS_CC); return FAILURE; } /* }}} */ SPL_API int spl_hash_verify_pos(spl_array_object * intern TSRMLS_DC) /* {{{ */ { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_hash_verify_pos_ex(intern, ht TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_free_storage */ static void spl_array_object_free_storage(void *object TSRMLS_DC) { spl_array_object *intern = (spl_array_object *)object; zend_object_std_dtor(&intern->std TSRMLS_CC); zval_ptr_dtor(&intern->array); zval_ptr_dtor(&intern->retval); if (intern->debug_info != NULL) { zend_hash_destroy(intern->debug_info); efree(intern->debug_info); } efree(object); } /* }}} */ zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC); /* {{{ spl_array_object_new_ex */ static zend_object_value spl_array_object_new_ex(zend_class_entry *class_type, spl_array_object **obj, zval *orig, int clone_orig TSRMLS_DC) { zend_object_value retval = {0}; spl_array_object *intern; zval *tmp; zend_class_entry * parent = class_type; int inherited = 0; intern = emalloc(sizeof(spl_array_object)); memset(intern, 0, sizeof(spl_array_object)); *obj = intern; ALLOC_INIT_ZVAL(intern->retval); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); intern->ar_flags = 0; intern->debug_info = NULL; intern->ce_get_iterator = spl_ce_ArrayIterator; if (orig) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(orig TSRMLS_CC); intern->ar_flags &= ~ SPL_ARRAY_CLONE_MASK; intern->ar_flags |= (other->ar_flags & SPL_ARRAY_CLONE_MASK); intern->ce_get_iterator = other->ce_get_iterator; if (clone_orig) { intern->array = other->array; if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayObject) { MAKE_STD_ZVAL(intern->array); array_init(intern->array); zend_hash_copy(HASH_OF(intern->array), HASH_OF(other->array), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayIterator) { Z_ADDREF_P(other->array); } } else { intern->array = orig; Z_ADDREF_P(intern->array); intern->ar_flags |= SPL_ARRAY_IS_REF | SPL_ARRAY_USE_OTHER; } } else { MAKE_STD_ZVAL(intern->array); array_init(intern->array); intern->ar_flags &= ~SPL_ARRAY_IS_REF; } retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_array_object_free_storage, NULL TSRMLS_CC); while (parent) { if (parent == spl_ce_ArrayIterator || parent == spl_ce_RecursiveArrayIterator) { retval.handlers = &spl_handler_ArrayIterator; class_type->get_iterator = spl_array_get_iterator; break; } else if (parent == spl_ce_ArrayObject) { retval.handlers = &spl_handler_ArrayObject; break; } parent = parent->parent; inherited = 1; } if (!parent) { /* this must never happen */ php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of ArrayObject or ArrayIterator"); } if (inherited) { zend_hash_find(&class_type->function_table, "offsetget", sizeof("offsetget"), (void **) &intern->fptr_offset_get); if (intern->fptr_offset_get->common.scope == parent) { intern->fptr_offset_get = NULL; } zend_hash_find(&class_type->function_table, "offsetset", sizeof("offsetset"), (void **) &intern->fptr_offset_set); if (intern->fptr_offset_set->common.scope == parent) { intern->fptr_offset_set = NULL; } zend_hash_find(&class_type->function_table, "offsetexists", sizeof("offsetexists"), (void **) &intern->fptr_offset_has); if (intern->fptr_offset_has->common.scope == parent) { intern->fptr_offset_has = NULL; } zend_hash_find(&class_type->function_table, "offsetunset", sizeof("offsetunset"), (void **) &intern->fptr_offset_del); if (intern->fptr_offset_del->common.scope == parent) { intern->fptr_offset_del = NULL; } zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count); if (intern->fptr_count->common.scope == parent) { intern->fptr_count = NULL; } } /* Cache iterator functions if ArrayIterator or derived. Check current's */ /* cache since only current is always required */ if (retval.handlers == &spl_handler_ArrayIterator) { if (!class_type->iterator_funcs.zf_current) { zend_hash_find(&class_type->function_table, "rewind", sizeof("rewind"), (void **) &class_type->iterator_funcs.zf_rewind); zend_hash_find(&class_type->function_table, "valid", sizeof("valid"), (void **) &class_type->iterator_funcs.zf_valid); zend_hash_find(&class_type->function_table, "key", sizeof("key"), (void **) &class_type->iterator_funcs.zf_key); zend_hash_find(&class_type->function_table, "current", sizeof("current"), (void **) &class_type->iterator_funcs.zf_current); zend_hash_find(&class_type->function_table, "next", sizeof("next"), (void **) &class_type->iterator_funcs.zf_next); } if (inherited) { if (class_type->iterator_funcs.zf_rewind->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_REWIND; if (class_type->iterator_funcs.zf_valid->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_VALID; if (class_type->iterator_funcs.zf_key->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_KEY; if (class_type->iterator_funcs.zf_current->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_CURRENT; if (class_type->iterator_funcs.zf_next->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_NEXT; } } spl_array_rewind(intern TSRMLS_CC); return retval; } /* }}} */ /* {{{ spl_array_object_new */ static zend_object_value spl_array_object_new(zend_class_entry *class_type TSRMLS_DC) { spl_array_object *tmp; return spl_array_object_new_ex(class_type, &tmp, NULL, 0 TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_clone */ static zend_object_value spl_array_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_array_object *intern; old_object = zend_objects_get_address(zobject TSRMLS_CC); new_obj_val = spl_array_object_new_ex(old_object->ce, &intern, zobject, 1 TSRMLS_CC); new_object = &intern->std; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); return new_obj_val; } /* }}} */ static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch(Z_TYPE_P(offset)) { case IS_NULL: Z_STRVAL_P(offset) = ""; Z_STRLEN_P(offset) = 0; case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", Z_STRVAL_P(offset)); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */ static zval *spl_array_read_dimension_ex(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { zval **ret; if (check_inherited) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_offset_get) { zval *rv; if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_get, "offsetGet", &rv, offset); zval_ptr_dtor(&offset); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); return intern->retval; } return EG(uninitialized_zval_ptr); } } ret = spl_array_get_dimension_ptr_ptr(check_inherited, object, offset, type TSRMLS_CC); /* When in a write context, * ZE has to be fooled into thinking this is in a reference set * by separating (if necessary) and returning as an is_ref=1 zval (even if refcount == 1) */ if ((type == BP_VAR_W || type == BP_VAR_RW || type == BP_VAR_UNSET) && !Z_ISREF_PP(ret) && ret != &EG(uninitialized_zval_ptr)) { if (Z_REFCOUNT_PP(ret) > 1) { zval *newval; /* Separate */ MAKE_STD_ZVAL(newval); *newval = **ret; zval_copy_ctor(newval); Z_SET_REFCOUNT_P(newval, 1); /* Replace */ Z_DELREF_PP(ret); *ret = newval; } Z_SET_ISREF_PP(ret); } return *ret; } /* }}} */ static zval *spl_array_read_dimension(zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { return spl_array_read_dimension_ex(1, object, offset, type TSRMLS_CC); } /* }}} */ static void spl_array_write_dimension_ex(int check_inherited, zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_set) { if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_2_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_set, "offsetSet", NULL, offset, value); zval_ptr_dtor(&offset); return; } if (!offset) { ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), NULL); return; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } Z_ADDREF_P(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), NULL); return; case IS_NULL: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; default: zend_error(E_WARNING, "Illegal offset type"); return; } } /* }}} */ static void spl_array_write_dimension(zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_write_dimension_ex(1, object, offset, value TSRMLS_CC); } /* }}} */ static void spl_array_unset_dimension_ex(int check_inherited, zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_del) { SEPARATE_ARG_IF_REF(offset); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_del, "offsetUnset", NULL, offset); zval_ptr_dtor(&offset); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (ht == &EG(symbol_table)) { if (zend_delete_global_variable(Z_STRVAL_P(offset), Z_STRLEN_P(offset) TSRMLS_CC)) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } } else { if (zend_symtable_del(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1) == FAILURE) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } else { spl_array_object *obj = intern; while (1) { if ((obj->ar_flags & SPL_ARRAY_IS_SELF) != 0) { break; } else if (Z_TYPE_P(obj->array) == IS_OBJECT) { if ((obj->ar_flags & SPL_ARRAY_USE_OTHER) == 0) { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); break; } else { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); } } else { obj = NULL; break; } } if (obj) { zend_property_info *property_info = zend_get_property_info(obj->std.ce, offset, 1 TSRMLS_CC); if (property_info && (property_info->flags & ZEND_ACC_STATIC) == 0 && property_info->offset >= 0) { obj->std.properties_table[property_info->offset] = NULL; } } } } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (zend_hash_index_del(ht, index) == FAILURE) { zend_error(E_NOTICE,"Undefined offset: %ld", Z_LVAL_P(offset)); } break; default: zend_error(E_WARNING, "Illegal offset type"); return; } spl_hash_verify_pos(intern TSRMLS_CC); /* call rewind on FAILURE */ } /* }}} */ static void spl_array_unset_dimension(zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_unset_dimension_ex(1, object, offset TSRMLS_CC); } /* }}} */ static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, **tmp; if (check_inherited && intern->fptr_offset_has) { SEPARATE_ARG_IF_REF(offset); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset); zval_ptr_dtor(&offset); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); return 1; } if (rv) { zval_ptr_dtor(&rv); } return 0; } switch(Z_TYPE_P(offset)) { case IS_STRING: { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { switch (check_empty) { case 0: return Z_TYPE_PP(tmp) != IS_NULL; case 2: return 1; default: return zend_is_true(*tmp); } } } return 0; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { switch (check_empty) { case 0: return Z_TYPE_PP(tmp) != IS_NULL; case 2: return 1; default: return zend_is_true(*tmp); } } return 0; } default: zend_error(E_WARNING, "Illegal offset type"); } return 0; } /* }}} */ static int spl_array_has_dimension(zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { return spl_array_has_dimension_ex(1, object, offset, check_empty TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_verify_pos_ex */ static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashTable *ht, const char *msg_prefix TSRMLS_DC) { if (!ht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and is no longer an array", msg_prefix); return FAILURE; } if (object->pos && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix); return FAILURE; } return SUCCESS; } /* }}} */ /* {{{ spl_array_object_verify_pos */ static inline int spl_array_object_verify_pos(spl_array_object *object, HashTable *ht TSRMLS_DC) { return spl_array_object_verify_pos_ex(object, ht, "" TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayObject::offsetExists(mixed $index) proto bool ArrayIterator::offsetExists(mixed $index) Returns whether the requested $index exists. */ SPL_METHOD(Array, offsetExists) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } RETURN_BOOL(spl_array_has_dimension_ex(0, getThis(), index, 2 TSRMLS_CC)); } /* }}} */ /* {{{ proto mixed ArrayObject::offsetGet(mixed $index) proto mixed ArrayIterator::offsetGet(mixed $index) Returns the value at the specified $index. */ SPL_METHOD(Array, offsetGet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } value = spl_array_read_dimension_ex(0, getThis(), index, BP_VAR_R TSRMLS_CC); RETURN_ZVAL(value, 1, 0); } /* }}} */ /* {{{ proto void ArrayObject::offsetSet(mixed $index, mixed $newval) proto void ArrayIterator::offsetSet(mixed $index, mixed $newval) Sets the value at the specified $index to $newval. */ SPL_METHOD(Array, offsetSet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &index, &value) == FAILURE) { return; } spl_array_write_dimension_ex(0, getThis(), index, value TSRMLS_CC); } /* }}} */ void spl_array_iterator_append(zval *object, zval *append_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "Cannot append properties to objects, use %s::offsetSet() instead", Z_OBJCE_P(object)->name); return; } spl_array_write_dimension(object, NULL, append_value TSRMLS_CC); if (!intern->pos) { spl_array_set_pos(intern, aht->pListTail); } } /* }}} */ /* {{{ proto void ArrayObject::append(mixed $newval) proto void ArrayIterator::append(mixed $newval) Appends the value (cannot be called for objects). */ SPL_METHOD(Array, append) { zval *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &value) == FAILURE) { return; } spl_array_iterator_append(getThis(), value TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::offsetUnset(mixed $index) proto void ArrayIterator::offsetUnset(mixed $index) Unsets the value at the specified $index. */ SPL_METHOD(Array, offsetUnset) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } spl_array_unset_dimension_ex(0, getThis(), index TSRMLS_CC); } /* }}} */ /* {{{ proto array ArrayObject::getArrayCopy() proto array ArrayIterator::getArrayCopy() Return a copy of the contained array */ SPL_METHOD(Array, getArrayCopy) { zval *object = getThis(), *tmp; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } /* }}} */ static HashTable *spl_array_get_properties(zval *object TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *result; if (intern->nApplyCount > 1) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Nesting level too deep - recursive dependency?"); } intern->nApplyCount++; result = spl_array_get_hash_table(intern, 1 TSRMLS_CC); intern->nApplyCount--; return result; } /* }}} */ static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */ static HashTable *spl_array_get_gc(zval *object, zval ***gc_data, int *gc_data_count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); *gc_data = &intern->array; *gc_data_count = 1; return zend_std_get_properties(object); } /* }}} */ static zval *spl_array_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_read_dimension(object, member, type TSRMLS_CC); } return std_object_handlers.read_property(object, member, type, key TSRMLS_CC); } /* }}} */ static void spl_array_write_property(zval *object, zval *member, zval *value, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_write_dimension(object, member, value TSRMLS_CC); return; } std_object_handlers.write_property(object, member, value, key TSRMLS_CC); } /* }}} */ static zval **spl_array_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_get_dimension_ptr_ptr(1, object, member, type TSRMLS_CC); } return std_object_handlers.get_property_ptr_ptr(object, member, type, key TSRMLS_CC); } /* }}} */ static int spl_array_has_property(zval *object, zval *member, int has_set_exists, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_has_dimension(object, member, has_set_exists TSRMLS_CC); } return std_object_handlers.has_property(object, member, has_set_exists, key TSRMLS_CC); } /* }}} */ static void spl_array_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_unset_dimension(object, member TSRMLS_CC); spl_array_rewind(intern TSRMLS_CC); /* because deletion might invalidate position */ return; } std_object_handlers.unset_property(object, member, key TSRMLS_CC); } /* }}} */ static int spl_array_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */ { HashTable *ht1, *ht2; spl_array_object *intern1, *intern2; int result = 0; zval temp_zv; intern1 = (spl_array_object*)zend_object_store_get_object(o1 TSRMLS_CC); intern2 = (spl_array_object*)zend_object_store_get_object(o2 TSRMLS_CC); ht1 = spl_array_get_hash_table(intern1, 0 TSRMLS_CC); ht2 = spl_array_get_hash_table(intern2, 0 TSRMLS_CC); zend_compare_symbol_tables(&temp_zv, ht1, ht2 TSRMLS_CC); result = (int)Z_LVAL(temp_zv); /* if we just compared std.properties, don't do it again */ if (result == 0 && !(ht1 == intern1->std.properties && ht2 == intern2->std.properties)) { result = std_object_handlers.compare_objects(o1, o2 TSRMLS_CC); } return result; } /* }}} */ static int spl_array_skip_protected(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { char *string_key; uint string_length; ulong num_key; if (Z_TYPE_P(intern->array) == IS_OBJECT) { do { if (zend_hash_get_current_key_ex(aht, &string_key, &string_length, &num_key, 0, &intern->pos) == HASH_KEY_IS_STRING) { /* zend_hash_get_current_key_ex() should never set * string_length to 0 when returning HASH_KEY_IS_STRING, but we * may as well be defensive and consider that successful. * Beyond that, we're looking for protected keys (which will * have a null byte at string_key[0]), but want to avoid * skipping completely empty keys (which will also have the * null byte, but a string_length of 1). */ if (!string_length || string_key[0] || string_length == 1) { return SUCCESS; } } else { return SUCCESS; } if (zend_hash_has_more_elements_ex(aht, &intern->pos) != SUCCESS) { return FAILURE; } zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); } while (1); } return FAILURE; } /* }}} */ static int spl_array_next_no_verify(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); if (Z_TYPE_P(intern->array) == IS_OBJECT) { return spl_array_skip_protected(intern, aht TSRMLS_CC); } else { return zend_hash_has_more_elements_ex(aht, &intern->pos); } } /* }}} */ static int spl_array_next_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { if ((intern->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(intern, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and internal position is no longer valid"); return FAILURE; } return spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ static int spl_array_next(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_array_next_ex(intern, aht TSRMLS_CC); } /* }}} */ /* define an overloaded iterator structure */ typedef struct { zend_user_iterator intern; spl_array_object *object; } spl_array_it; static void spl_array_it_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; zend_user_it_invalidate_current(iter TSRMLS_CC); zval_ptr_dtor((zval**)&iterator->intern.it.data); efree(iterator); } /* }}} */ static int spl_array_it_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_VALID) { return zend_user_it_valid(iter TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::valid(): " TSRMLS_CC) == FAILURE) { return FAILURE; } return zend_hash_has_more_elements_ex(aht, &object->pos); } } /* }}} */ static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) { zend_user_it_get_current_data(iter, data TSRMLS_CC); } else { if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) { *data = NULL; } } } /* }}} */ static void spl_array_it_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_KEY) { zend_user_it_get_current_key(iter, key TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::current(): " TSRMLS_CC) == FAILURE) { ZVAL_NULL(key); } else { zend_hash_get_current_key_zval_ex(aht, key, &object->pos); } } } /* }}} */ static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) { zend_user_it_move_forward(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array"); return; } if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid"); } else { spl_array_next_no_verify(object, aht TSRMLS_CC); } } } /* }}} */ static void spl_array_rewind_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_internal_pointer_reset_ex(aht, &intern->pos); spl_array_update_pos(intern); spl_array_skip_protected(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::rewind(): Array was modified outside object and is no longer an array"); return; } spl_array_rewind_ex(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; if (object->ar_flags & SPL_ARRAY_OVERLOADED_REWIND) { zend_user_it_rewind(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); spl_array_rewind(object TSRMLS_CC); } } /* }}} */ /* {{{ spl_array_set_array */ static void spl_array_set_array(zval *object, spl_array_object *intern, zval **array, long ar_flags, int just_array TSRMLS_DC) { if (Z_TYPE_PP(array) == IS_ARRAY) { SEPARATE_ZVAL_IF_NOT_REF(array); } if (Z_TYPE_PP(array) == IS_OBJECT && (Z_OBJ_HT_PP(array) == &spl_handler_ArrayObject || Z_OBJ_HT_PP(array) == &spl_handler_ArrayIterator)) { zval_ptr_dtor(&intern->array); if (just_array) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(*array TSRMLS_CC); ar_flags = other->ar_flags & ~SPL_ARRAY_INT_MASK; } ar_flags |= SPL_ARRAY_USE_OTHER; intern->array = *array; } else { if (Z_TYPE_PP(array) != IS_OBJECT && Z_TYPE_PP(array) != IS_ARRAY) { zend_throw_exception(spl_ce_InvalidArgumentException, "Passed variable is not an array or object, using empty array instead", 0 TSRMLS_CC); return; } zval_ptr_dtor(&intern->array); intern->array = *array; } if (object == *array) { intern->ar_flags |= SPL_ARRAY_IS_SELF; intern->ar_flags &= ~SPL_ARRAY_USE_OTHER; } else { intern->ar_flags &= ~SPL_ARRAY_IS_SELF; } intern->ar_flags |= ar_flags; Z_ADDREF_P(intern->array); if (Z_TYPE_PP(array) == IS_OBJECT) { zend_object_get_properties_t handler = Z_OBJ_HANDLER_PP(array, get_properties); if ((handler != std_object_handlers.get_properties && handler != spl_array_get_properties) || !spl_array_get_hash_table(intern, 0 TSRMLS_CC)) { zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0 TSRMLS_CC, "Overloaded object of type %s is not compatible with %s", Z_OBJCE_PP(array)->name, intern->std.ce->name); } } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* iterator handler table */ zend_object_iterator_funcs spl_array_it_funcs = { spl_array_it_dtor, spl_array_it_valid, spl_array_it_get_current_data, spl_array_it_get_current_key, spl_array_it_move_forward, spl_array_it_rewind }; zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */ { spl_array_it *iterator; spl_array_object *array_object = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (by_ref && (array_object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT)) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } iterator = emalloc(sizeof(spl_array_it)); Z_ADDREF_P(object); iterator->intern.it.data = (void*)object; iterator->intern.it.funcs = &spl_array_it_funcs; iterator->intern.ce = ce; iterator->intern.value = NULL; iterator->object = array_object; return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ proto void ArrayObject::__construct(array|object ar = array() [, int flags = 0 [, string iterator_class = "ArrayIterator"]]) proto void ArrayIterator::__construct(array|object ar = array() [, int flags = 0]) Constructs a new array iterator from a path. */ SPL_METHOD(Array, __construct) { zval *object = getThis(); spl_array_object *intern; zval **array; long ar_flags = 0; zend_class_entry *ce_get_iterator = spl_ce_Iterator; zend_error_handling error_handling; if (ZEND_NUM_ARGS() == 0) { return; /* nothing to do */ } zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling TSRMLS_CC); intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|lC", &array, &ar_flags, &ce_get_iterator) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (ZEND_NUM_ARGS() > 2) { intern->ce_get_iterator = ce_get_iterator; } ar_flags &= ~SPL_ARRAY_INT_MASK; spl_array_set_array(object, intern, array, ar_flags, ZEND_NUM_ARGS() == 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::setIteratorClass(string iterator_class) Set the class used in getIterator. */ SPL_METHOD(Array, setIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zend_class_entry * ce_get_iterator = spl_ce_Iterator; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "C", &ce_get_iterator) == FAILURE) { return; } intern->ce_get_iterator = ce_get_iterator; } /* }}} */ /* {{{ proto string ArrayObject::getIteratorClass() Get the class used in getIterator. */ SPL_METHOD(Array, getIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->ce_get_iterator->name, 1); } /* }}} */ /* {{{ proto int ArrayObject::getFlags() Get flags */ SPL_METHOD(Array, getFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto void ArrayObject::setFlags(int flags) Set flags */ SPL_METHOD(Array, setFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long ar_flags = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &ar_flags) == FAILURE) { return; } intern->ar_flags = (intern->ar_flags & SPL_ARRAY_INT_MASK) | (ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto Array|Object ArrayObject::exchangeArray(Array|Object ar = array()) Replace the referenced array or object with a new one and return the old one (right now copy - to be changed) */ SPL_METHOD(Array, exchangeArray) { zval *object = getThis(), *tmp, **array; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &array) == FAILURE) { return; } spl_array_set_array(object, intern, array, 0L, 1 TSRMLS_CC); } /* }}} */ /* {{{ proto ArrayIterator ArrayObject::getIterator() Create a new iterator from a ArrayObject instance */ SPL_METHOD(Array, getIterator) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); spl_array_object *iterator; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } return_value->type = IS_OBJECT; return_value->value.obj = spl_array_object_new_ex(intern->ce_get_iterator, &iterator, object, 0 TSRMLS_CC); Z_SET_REFCOUNT_P(return_value, 1); Z_SET_ISREF_P(return_value); } /* }}} */ /* {{{ proto void ArrayIterator::rewind() Rewind array back to the start */ SPL_METHOD(Array, rewind) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayIterator::seek(int $position) Seek to position. */ SPL_METHOD(Array, seek) { long opos, position; zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); int result; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } opos = position; if (position >= 0) { /* negative values are not supported */ spl_array_rewind(intern TSRMLS_CC); result = SUCCESS; while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS); if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) { return; /* ok */ } } zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos); } /* }}} */ int static spl_array_object_count_elements_helper(spl_array_object *intern, long *count TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); HashPosition pos; if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); *count = 0; return FAILURE; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { /* We need to store the 'pos' since we'll modify it in the functions * we're going to call and which do not support 'pos' as parameter. */ pos = intern->pos; *count = 0; spl_array_rewind(intern TSRMLS_CC); while(intern->pos && spl_array_next(intern TSRMLS_CC) == SUCCESS) { (*count)++; } spl_array_set_pos(intern, pos); return SUCCESS; } else { *count = zend_hash_num_elements(aht); return SUCCESS; } } /* }}} */ int spl_array_object_count_elements(zval *object, long *count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_count) { zval *rv; zend_call_method_with_0_params(&object, intern->std.ce, &intern->fptr_count, "count", &rv); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); convert_to_long(intern->retval); *count = (long) Z_LVAL_P(intern->retval); return SUCCESS; } *count = 0; return FAILURE; } return spl_array_object_count_elements_helper(intern, count TSRMLS_CC); } /* }}} */ /* {{{ proto int ArrayObject::count() proto int ArrayIterator::count() Return the number of elements in the Iterator. */ SPL_METHOD(Array, count) { long count; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_object_count_elements_helper(intern, &count TSRMLS_CC); RETURN_LONG(count); } /* }}} */ static void spl_array_method(INTERNAL_FUNCTION_PARAMETERS, char *fname, int fname_len, int use_arg) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval *tmp, *arg = NULL; zval *retval_ptr = NULL; MAKE_STD_ZVAL(tmp); Z_TYPE_P(tmp) = IS_ARRAY; Z_ARRVAL_P(tmp) = aht; if (!use_arg) { aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 1, tmp, NULL TSRMLS_CC); aht->nApplyCount--; } else if (use_arg == SPL_ARRAY_METHOD_MAY_USER_ARG) { if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "|z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects one argument at most", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, arg? 2 : 1, tmp, arg TSRMLS_CC); aht->nApplyCount--; } else { if (ZEND_NUM_ARGS() != 1 || zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects exactly one argument", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 2, tmp, arg TSRMLS_CC); aht->nApplyCount--; } Z_TYPE_P(tmp) = IS_NULL; /* we want to destroy the zval, not the hashtable */ zval_ptr_dtor(&tmp); if (retval_ptr) { COPY_PZVAL_TO_ZVAL(*return_value, retval_ptr); } } /* }}} */ #define SPL_ARRAY_METHOD(cname, fname, use_arg) \ SPL_METHOD(cname, fname) \ { \ spl_array_method(INTERNAL_FUNCTION_PARAM_PASSTHRU, #fname, sizeof(#fname)-1, use_arg); \ } /* {{{ proto int ArrayObject::asort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::asort([int $sort_flags = SORT_REGULAR ]) Sort the entries by values. */ SPL_ARRAY_METHOD(Array, asort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::ksort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::ksort([int $sort_flags = SORT_REGULAR ]) Sort the entries by key. */ SPL_ARRAY_METHOD(Array, ksort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::uasort(callback cmp_function) proto int ArrayIterator::uasort(callback cmp_function) Sort the entries by values user defined function. */ SPL_ARRAY_METHOD(Array, uasort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::uksort(callback cmp_function) proto int ArrayIterator::uksort(callback cmp_function) Sort the entries by key using user defined function. */ SPL_ARRAY_METHOD(Array, uksort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::natsort() proto int ArrayIterator::natsort() Sort the entries by values using "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natsort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto int ArrayObject::natcasesort() proto int ArrayIterator::natcasesort() Sort the entries by key using case insensitive "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natcasesort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::current() Return current array entry */ SPL_METHOD(Array, current) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **entry; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } RETVAL_ZVAL(*entry, 1, 0); } /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::key() Return current array key */ SPL_METHOD(Array, key) { if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_iterator_key(getThis(), return_value TSRMLS_CC); } /* }}} */ void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } zend_hash_get_current_key_zval_ex(aht, return_value, &intern->pos); } /* }}} */ /* {{{ proto void ArrayIterator::next() Move to next entry */ SPL_METHOD(Array, next) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayIterator::valid() Check whether array contains more entries */ SPL_METHOD(Array, valid) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } else { RETURN_BOOL(zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS); } } /* }}} */ /* {{{ proto bool RecursiveArrayIterator::hasChildren() Check whether current element has children (e.g. is an array) */ SPL_METHOD(Array, hasChildren) { zval *object = getThis(), **entry; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { RETURN_FALSE; } RETURN_BOOL(Z_TYPE_PP(entry) == IS_ARRAY || (Z_TYPE_PP(entry) == IS_OBJECT && (intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) == 0)); } /* }}} */ /* {{{ proto object RecursiveArrayIterator::getChildren() Create a sub iterator for the current element (same class as $this) */ SPL_METHOD(Array, getChildren) { zval *object = getThis(), **entry, *flags; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } if (Z_TYPE_PP(entry) == IS_OBJECT) { if ((intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) != 0) { return; } if (instanceof_function(Z_OBJCE_PP(entry), Z_OBJCE_P(getThis()) TSRMLS_CC)) { RETURN_ZVAL(*entry, 1, 0); } } MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, SPL_ARRAY_USE_OTHER | intern->ar_flags); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, *entry, flags TSRMLS_CC); zval_ptr_dtor(&flags); } /* }}} */ /* {{{ proto string ArrayObject::serialize() Serialize the object */ SPL_METHOD(Array, serialize) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval members, *pmembers; php_serialize_data_t var_hash; smart_str buf = {0}; zval *flags; if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } PHP_VAR_SERIALIZE_INIT(var_hash); MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, (intern->ar_flags & SPL_ARRAY_CLONE_MASK)); /* storage */ smart_str_appendl(&buf, "x:", 2); php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC); zval_ptr_dtor(&flags); if (!(intern->ar_flags & SPL_ARRAY_IS_SELF)) { php_var_serialize(&buf, &intern->array, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ';'); } /* members */ smart_str_appendl(&buf, "m:", 2); INIT_PZVAL(&members); if (!intern->std.properties) { rebuild_object_properties(&intern->std); } Z_ARRVAL(members) = intern->std.properties; Z_TYPE(members) = IS_ARRAY; pmembers = &members; php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */ /* done */ PHP_VAR_SERIALIZE_DESTROY(var_hash); if (buf.c) { RETURN_STRINGL(buf.c, buf.len, 0); } RETURN_NULL(); } /* }}} */ /* {{{ proto void ArrayObject::unserialize(string serialized) * unserialize the object */ SPL_METHOD(Array, unserialize) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *buf; int buf_len; const unsigned char *p, *s; php_unserialize_data_t var_hash; zval *pmembers, *pflags = NULL; HashTable *aht; long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) { return; } if (buf_len == 0) { zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Empty serialized string cannot be empty"); return; } aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (aht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } /* storage */ s = p = (const unsigned char*)buf; PHP_VAR_UNSERIALIZE_INIT(var_hash); if (*p!= 'x' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pflags); if (!php_var_unserialize(&pflags, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pflags) != IS_LONG) { goto outexcept; } var_push_dtor(&var_hash, &pflags); --p; /* for ';' */ flags = Z_LVAL_P(pflags); /* flags needs to be verified and we also need to verify whether the next * thing we get is ';'. After that we require an 'm' or somethign else * where 'm' stands for members and anything else should be an array. If * neither 'a' or 'm' follows we have an error. */ if (*p != ';') { goto outexcept; } ++p; if (*p!='m') { if (*p!='a' && *p!='O' && *p!='C' && *p!='r') { goto outexcept; } intern->ar_flags &= ~SPL_ARRAY_CLONE_MASK; intern->ar_flags |= flags & SPL_ARRAY_CLONE_MASK; zval_ptr_dtor(&intern->array); ALLOC_INIT_ZVAL(intern->array); if (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)) { goto outexcept; } var_push_dtor(&var_hash, &intern->array); } if (*p != ';') { goto outexcept; } ++p; /* members */ if (*p!= 'm' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pmembers); if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) { zval_ptr_dtor(&pmembers); goto outexcept; } var_push_dtor(&var_hash, &pmembers); /* copy members */ if (!intern->std.properties) { rebuild_object_properties(&intern->std); } zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *)); zval_ptr_dtor(&pmembers); /* done reading $serialized */ PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } return; outexcept: PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len); return; } /* }}} */ /* {{{ arginfo and function tbale */ ZEND_BEGIN_ARG_INFO(arginfo_array___construct, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetGet, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetSet, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, newval) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_append, 0) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_exchangeArray, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setIteratorClass, 0) ZEND_ARG_INFO(0, iteratorClass) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_uXsort, 0) ZEND_ARG_INFO(0, cmp_function) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_unserialize, 0) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_void, 0) ZEND_END_ARG_INFO() static const zend_function_entry spl_funcs_ArrayObject[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayObject specific */ SPL_ME(Array, getIterator, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, exchangeArray, arginfo_array_exchangeArray, ZEND_ACC_PUBLIC) SPL_ME(Array, setIteratorClass, arginfo_array_setIteratorClass, ZEND_ACC_PUBLIC) SPL_ME(Array, getIteratorClass, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_ArrayIterator[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayIterator specific */ SPL_ME(Array, rewind, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, current, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, key, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, next, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, valid, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, seek, arginfo_array_seek, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_RecursiveArrayIterator[] = { SPL_ME(Array, hasChildren, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getChildren, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_array) */ PHP_MINIT_FUNCTION(spl_array) { REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject); REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate); REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable); memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_handler_ArrayObject.clone_obj = spl_array_object_clone; spl_handler_ArrayObject.read_dimension = spl_array_read_dimension; spl_handler_ArrayObject.write_dimension = spl_array_write_dimension; spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension; spl_handler_ArrayObject.has_dimension = spl_array_has_dimension; spl_handler_ArrayObject.count_elements = spl_array_object_count_elements; spl_handler_ArrayObject.get_properties = spl_array_get_properties; spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info; spl_handler_ArrayObject.get_gc = spl_array_get_gc; spl_handler_ArrayObject.read_property = spl_array_read_property; spl_handler_ArrayObject.write_property = spl_array_write_property; spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr; spl_handler_ArrayObject.has_property = spl_array_has_property; spl_handler_ArrayObject.unset_property = spl_array_unset_property; spl_handler_ArrayObject.compare_objects = spl_array_compare_objects; REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable); memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers)); spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator); REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator); spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-416/c/good_5171_1
crossvul-cpp_data_bad_903_0
/* irc-core.c : irssi Copyright (C) 1999-2000 Timo Sirainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "module.h" #include <irssi/src/core/chat-protocols.h> #include <irssi/src/core/settings.h> #include <irssi/src/irc/core/irc-servers.h> #include <irssi/src/irc/core/irc-chatnets.h> #include <irssi/src/irc/core/irc-channels.h> #include <irssi/src/irc/core/irc-queries.h> #include <irssi/src/irc/core/irc-cap.h> #include <irssi/src/irc/core/sasl.h> #include <irssi/src/irc/core/irc-servers-setup.h> #include <irssi/src/core/channels-setup.h> #include <irssi/src/irc/core/ctcp.h> #include <irssi/src/irc/core/irc-commands.h> #include <irssi/src/irc/core/netsplit.h> void irc_expandos_init(void); void irc_expandos_deinit(void); void irc_session_init(void); void irc_session_deinit(void); void lag_init(void); void lag_deinit(void); static CHATNET_REC *create_chatnet(void) { return g_malloc0(sizeof(IRC_CHATNET_REC)); } static SERVER_SETUP_REC *create_server_setup(void) { return g_malloc0(sizeof(IRC_SERVER_SETUP_REC)); } static CHANNEL_SETUP_REC *create_channel_setup(void) { return g_malloc0(sizeof(CHANNEL_SETUP_REC)); } static SERVER_CONNECT_REC *create_server_connect(void) { return g_malloc0(sizeof(IRC_SERVER_CONNECT_REC)); } static void destroy_server_connect(SERVER_CONNECT_REC *conn) { IRC_SERVER_CONNECT_REC *ircconn; ircconn = IRC_SERVER_CONNECT(conn); if (ircconn == NULL) return; g_free_not_null(ircconn->usermode); g_free_not_null(ircconn->alternate_nick); } void irc_core_init(void) { CHAT_PROTOCOL_REC *rec; rec = g_new0(CHAT_PROTOCOL_REC, 1); rec->name = "IRC"; rec->fullname = "Internet Relay Chat"; rec->chatnet = "ircnet"; rec->case_insensitive = TRUE; rec->create_chatnet = create_chatnet; rec->create_server_setup = create_server_setup; rec->create_channel_setup = create_channel_setup; rec->create_server_connect = create_server_connect; rec->destroy_server_connect = destroy_server_connect; rec->server_init_connect = irc_server_init_connect; rec->server_connect = irc_server_connect; rec->channel_create = (CHANNEL_REC *(*) (SERVER_REC *, const char *, const char *, int)) irc_channel_create; rec->query_create = (QUERY_REC *(*) (const char *, const char *, int)) irc_query_create; chat_protocol_register(rec); g_free(rec); irc_session_init(); irc_chatnets_init(); irc_servers_init(); irc_channels_init(); irc_queries_init(); ctcp_init(); irc_commands_init(); irc_irc_init(); lag_init(); netsplit_init(); irc_expandos_init(); irc_cap_init(); sasl_init(); settings_check(); module_register("core", "irc"); } void irc_core_deinit(void) { signal_emit("chat protocol deinit", 1, chat_protocol_find("IRC")); sasl_deinit(); irc_cap_deinit(); irc_expandos_deinit(); netsplit_deinit(); lag_deinit(); irc_commands_deinit(); ctcp_deinit(); irc_queries_deinit(); irc_channels_deinit(); irc_irc_deinit(); irc_servers_deinit(); irc_chatnets_deinit(); irc_session_deinit(); chat_protocol_unregister("IRC"); }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_903_0
crossvul-cpp_data_bad_180_0
/* radare - LGPL - Copyright 2010-2016 - pancake, nibble */ #include <r_anal.h> #include <r_util.h> #include <r_list.h> #include <limits.h> #define DFLT_NINSTR 3 R_API RAnalBlock *r_anal_bb_new() { RAnalBlock *bb = R_NEW0 (RAnalBlock); if (!bb) { return NULL; } bb->addr = UT64_MAX; bb->jump = UT64_MAX; bb->fail = UT64_MAX; bb->switch_op = NULL; bb->type = R_ANAL_BB_TYPE_NULL; bb->cond = NULL; bb->fingerprint = NULL; bb->diff = NULL; //r_anal_diff_new (); bb->label = NULL; bb->op_pos = R_NEWS0 (ut16, DFLT_NINSTR); bb->op_pos_size = DFLT_NINSTR; bb->parent_reg_arena = NULL; bb->stackptr = 0; bb->parent_stackptr = INT_MAX; return bb; } R_API void r_anal_bb_free(RAnalBlock *bb) { if (!bb) { return; } r_anal_cond_free (bb->cond); R_FREE (bb->fingerprint); r_anal_diff_free (bb->diff); bb->diff = NULL; R_FREE (bb->op_bytes); r_anal_switch_op_free (bb->switch_op); bb->switch_op = NULL; bb->fingerprint = NULL; bb->cond = NULL; R_FREE (bb->label); R_FREE (bb->op_pos); R_FREE (bb->parent_reg_arena); if (bb->prev) { if (bb->prev->jumpbb == bb) { bb->prev->jumpbb = NULL; } if (bb->prev->failbb == bb) { bb->prev->failbb = NULL; } bb->prev = NULL; } if (bb->jumpbb) { bb->jumpbb->prev = NULL; bb->jumpbb = NULL; } if (bb->failbb) { bb->failbb->prev = NULL; bb->failbb = NULL; } R_FREE (bb); } R_API RList *r_anal_bb_list_new() { RList *list = r_list_newf ((RListFree)r_anal_bb_free); if (!list) { return NULL; } return list; } R_API int r_anal_bb(RAnal *anal, RAnalBlock *bb, ut64 addr, ut8 *buf, ut64 len, int head) { RAnalOp *op = NULL; int oplen, idx = 0; if (bb->addr == -1) { bb->addr = addr; } len -= 16; // XXX: hack to avoid segfault by x86im while (idx < len) { // TODO: too slow object construction if (!(op = r_anal_op_new ())) { eprintf ("Error: new (op)\n"); return R_ANAL_RET_ERROR; } if ((oplen = r_anal_op (anal, op, addr + idx, buf + idx, len - idx, R_ANAL_OP_MASK_VAL)) == 0) { r_anal_op_free (op); op = NULL; if (idx == 0) { VERBOSE_ANAL eprintf ("Unknown opcode at 0x%08"PFMT64x"\n", addr+idx); return R_ANAL_RET_END; } break; } if (oplen < 1) { goto beach; } r_anal_bb_set_offset (bb, bb->ninstr++, addr + idx - bb->addr); idx += oplen; bb->size += oplen; if (head) { bb->type = R_ANAL_BB_TYPE_HEAD; } switch (op->type) { case R_ANAL_OP_TYPE_CMP: r_anal_cond_free (bb->cond); bb->cond = r_anal_cond_new_from_op (op); break; case R_ANAL_OP_TYPE_CJMP: if (bb->cond) { // TODO: get values from anal backend bb->cond->type = R_ANAL_COND_EQ; } else VERBOSE_ANAL eprintf ("Unknown conditional for block 0x%"PFMT64x"\n", bb->addr); bb->conditional = 1; bb->fail = op->fail; bb->jump = op->jump; bb->type |= R_ANAL_BB_TYPE_BODY; goto beach; case R_ANAL_OP_TYPE_JMP: bb->jump = op->jump; bb->type |= R_ANAL_BB_TYPE_BODY; goto beach; case R_ANAL_OP_TYPE_UJMP: case R_ANAL_OP_TYPE_IJMP: case R_ANAL_OP_TYPE_RJMP: case R_ANAL_OP_TYPE_IRJMP: bb->type |= R_ANAL_BB_TYPE_FOOT; goto beach; case R_ANAL_OP_TYPE_RET: bb->type |= R_ANAL_BB_TYPE_LAST; goto beach; case R_ANAL_OP_TYPE_LEA: { RAnalValue *src = op->src[0]; if (src && src->reg && anal->reg) { const char *pc = anal->reg->name[R_REG_NAME_PC]; RAnalValue *dst = op->dst; if (dst && dst->reg && !strcmp (src->reg->name, pc)) { int memref = anal->bits/8; ut8 b[8]; ut64 ptr = idx+addr+src->delta; anal->iob.read_at (anal->iob.io, ptr, b, memref); r_anal_xrefs_set (anal, addr+idx-op->size, ptr, R_ANAL_REF_TYPE_DATA); } } } } r_anal_op_free (op); } return bb->size; beach: r_anal_op_free (op); return R_ANAL_RET_END; } R_API inline int r_anal_bb_is_in_offset (RAnalBlock *bb, ut64 off) { return (off >= bb->addr && off < bb->addr + bb->size); } R_API RAnalBlock *r_anal_bb_from_offset(RAnal *anal, ut64 off) { RListIter *iter, *iter2; RAnalFunction *fcn; RAnalBlock *bb; r_list_foreach (anal->fcns, iter, fcn) { r_list_foreach (fcn->bbs, iter2, bb) { if (r_anal_bb_is_in_offset (bb, off)) { return bb; } } } return NULL; } R_API RAnalBlock *r_anal_bb_get_jumpbb(RAnalFunction *fcn, RAnalBlock *bb) { if (bb->jump == UT64_MAX) { return NULL; } if (bb->jumpbb) { return bb->jumpbb; } RListIter *iter; RAnalBlock *b; r_list_foreach (fcn->bbs, iter, b) { if (b->addr == bb->jump) { bb->jumpbb = b; b->prev = bb; return b; } } return NULL; } R_API RAnalBlock *r_anal_bb_get_failbb(RAnalFunction *fcn, RAnalBlock *bb) { RListIter *iter; RAnalBlock *b; if (bb->fail == UT64_MAX) { return NULL; } if (bb->failbb) { return bb->failbb; } r_list_foreach (fcn->bbs, iter, b) { if (b->addr == bb->fail) { bb->failbb = b; b->prev = bb; return b; } } return NULL; } /* return the offset of the i-th instruction in the basicblock bb. * If the index of the instruction is not valid, it returns UT16_MAX */ R_API ut16 r_anal_bb_offset_inst(RAnalBlock *bb, int i) { if (i < 0 || i >= bb->ninstr) { return UT16_MAX; } return (i > 0 && (i - 1) < bb->op_pos_size) ? bb->op_pos[i - 1] : 0; } /* set the offset of the i-th instruction in the basicblock bb */ R_API bool r_anal_bb_set_offset(RAnalBlock *bb, int i, ut16 v) { // the offset 0 of the instruction 0 is not stored because always 0 if (i > 0 && v > 0) { if (i >= bb->op_pos_size) { int new_pos_size = i * 2; ut16 *tmp_op_pos = realloc (bb->op_pos, new_pos_size * sizeof (*bb->op_pos)); if (!tmp_op_pos) { return false; } bb->op_pos_size = new_pos_size; bb->op_pos = tmp_op_pos; } bb->op_pos[i - 1] = v; return true; } return true; } /* return the address of the instruction that occupy a given offset. * If the offset is not part of the given basicblock, UT64_MAX is returned. */ R_API ut64 r_anal_bb_opaddr_at(RAnalBlock *bb, ut64 off) { ut16 delta, delta_off, last_delta; int i; if (!r_anal_bb_is_in_offset (bb, off)) { return UT64_MAX; } last_delta = 0; delta_off = off - bb->addr; for (i = 0; i < bb->ninstr; i++) { delta = r_anal_bb_offset_inst (bb, i); if (delta > delta_off) { return bb->addr + last_delta; } last_delta = delta; } return UT64_MAX; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_180_0
crossvul-cpp_data_bad_2838_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narsi@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/hash.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/sched/signal.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/busy_poll.h> #include <linux/socket.h> /* for sa_family_t */ #include <linux/export.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); static void sctp_destruct_sock(struct sock *sk); static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, enum sctp_socket_type type); static unsigned long sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { sctp_memory_pressure = 1; } /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { int amt; if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) amt = 0; else { amt = sk_stream_wspace(asoc->base.sk); if (amt < 0) amt = 0; } } else { amt = asoc->base.sk->sk_sndbuf - amt; } return amt; } /* Increment the used sndbuf space count of the corresponding association by * the size of the outgoing data chunk. * Also, set the skb destructor for sndbuf accounting later. * * Since it is always 1-1 between chunk and skb, and also a new skb is always * allocated for chunk bundling in sctp_packet_transmit(), we can use the * destructor in the data chunk skb for the purpose of the sndbuf space * tracking. */ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; /* The sndbuf space is tracked per association. */ sctp_association_hold(asoc); skb_set_owner_w(chunk->skb, sk); chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sk->sk_wmem_queued += chunk->skb->truesize; sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) { struct sctp_af *af; /* Verify basic sockaddr. */ af = sctp_sockaddr_af(sctp_sk(sk), addr, len); if (!af) return -EINVAL; /* Is this a valid SCTP address? */ if (!af->addr_valid(addr, sctp_sk(sk), NULL)) return -EINVAL; if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) return -EINVAL; return 0; } /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) { struct sctp_association *asoc = NULL; /* If this is not a UDP-style socket, assoc id should be ignored. */ if (!sctp_style(sk, UDP)) { /* Return NULL if the socket state is not ESTABLISHED. It * could be a TCP-style listening socket or a socket which * hasn't yet called connect() to establish an association. */ if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING)) return NULL; /* Get the first and the only association from the list. */ if (!list_empty(&sctp_sk(sk)->ep->asocs)) asoc = list_entry(sctp_sk(sk)->ep->asocs.next, struct sctp_association, asocs); return asoc; } /* Otherwise this is a UDP-style socket. */ if (!id || (id == (sctp_assoc_t)-1)) return NULL; spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); spin_unlock_bh(&sctp_assocs_id_lock); if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) return NULL; return asoc; } /* Look up the transport from an address and an assoc id. If both address and * id are specified, the associations matching the address and the id should be * the same. */ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, struct sockaddr_storage *addr, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; struct sctp_af *af = sctp_get_af_specific(addr->ss_family); union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, &transport); if (!addr_asoc) return NULL; id_asoc = sctp_id2assoc(sk, id); if (id_asoc && (id_asoc != addr_asoc)) return NULL; sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)addr); return transport; } /* API 3.1.2 bind() - UDP Style Syntax * The syntax of bind() is, * * ret = bind(int sd, struct sockaddr *addr, int addrlen); * * sd - the socket descriptor returned by socket(). * addr - the address structure (struct sockaddr_in or struct * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; lock_sock(sk); pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) retval = sctp_do_bind(sk, (union sctp_addr *)addr, addr_len); else retval = -EINVAL; release_sock(sk); return retval; } static long sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len) { struct sctp_af *af; /* Check minimum size. */ if (len < sizeof (struct sockaddr)) return NULL; /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { if (!opt->pf->af_supported(AF_INET, opt)) return NULL; } else { /* Does this PF support this AF? */ if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); if (len < af->sockaddr_len) return NULL; return af; } /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct sctp_bind_addr *bp = &ep->base.bind_addr; struct sctp_af *af; unsigned short snum; int ret = 0; /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", __func__, sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", __func__, sk, &addr->sa, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; /* We must either be unbound, or bind to the same port. * It's OK to allow 0 ports if we are already bound. * We'll just inhert an already bound port in this case */ if (bp->port) { if (!snum) snum = bp->port; else if (snum != bp->port) { pr_debug("%s: new port %d doesn't match existing port " "%d\n", __func__, snum, bp->port); return -EINVAL; } } if (snum && snum < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; /* See if the address matches any of the addresses we may have * already bound before checking against other endpoints. */ if (sctp_bind_addr_match(bp, addr, sp)) return -EINVAL; /* Make sure we are allowed to bind here. * The function sctp_get_port_local() does duplicate address * detection. */ addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { return -EADDRINUSE; } /* Refresh ephemeral port. */ if (!bp->port) bp->port = inet_sk(sk)->inet_num; /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); sp->pf->to_sk_saddr(addr, sk); } return ret; } /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks * * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged * at any one time. If a sender, after sending an ASCONF chunk, decides * it needs to transfer another ASCONF Chunk, it MUST wait until the * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a * subsequent ASCONF. Note this restriction binds each side, so at any * time two ASCONF may be in-transit on any given association (one sent * from each endpoint). */ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { struct net *net = sock_net(asoc->base.sk); int retval = 0; /* If there is an outstanding ASCONF chunk, queue it for later * transmission. */ if (asoc->addip_last_asconf) { list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else asoc->addip_last_asconf = chunk; out: return retval; } /* Add a list of addresses as bind addresses to local endpoint or * association. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_do_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were added will be removed. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) { int cnt; int retval = 0; void *addr_buf; struct sockaddr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* The list may contain either IPv4 or IPv6 address; * determine the address length for walking thru the list. */ sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); if (!af) { retval = -EINVAL; goto err_bindx_add; } retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, af->sockaddr_len); addr_buf += af->sockaddr_len; err_bindx_add: if (retval < 0) { /* Failed. Cleanup the ones that have been added */ if (cnt > 0) sctp_bindx_rem(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Add IP address parameters to all the peers of the * associations that are part of the endpoint indicating that a list of local * addresses are added to the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_add_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *p; int i; int retval = 0; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * in the bind address list of the association. If so, * do not send the asconf chunk to its peer, but continue with * other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (sctp_assoc_lookup_laddr(asoc, addr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Use the first valid address in bind addr list of * association as Address Parameter of ASCONF CHUNK. */ bp = &asoc->base.bind_addr; p = bp->address_list.next; laddr = list_entry(p, struct sctp_sockaddr_entry, list); chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, addrcnt, SCTP_PARAM_ADD_IP); if (!chunk) { retval = -ENOMEM; goto out; } /* Add the new addresses to the bind address list with * use_as_src set to 0. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); retval = sctp_add_bind_addr(bp, &saveaddr, sizeof(saveaddr), SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } if (asoc->src_out_of_asoc_ok) { struct sctp_transport *trans; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ sctp_transport_dst_release(trans); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; sctp_transport_route(trans, NULL, sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* Remove a list of addresses from bind addresses list. Do not remove the * last address. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_del_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were removed will be added back. * * At least one address has to be left; if only one address is * available, the operation will return -EBUSY. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; int cnt; struct sctp_bind_addr *bp = &ep->base.bind_addr; int retval = 0; void *addr_buf; union sctp_addr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* If the bind address list is empty or if there is only one * bind address, there is nothing more to be removed (we need * at least one address here). */ if (list_empty(&bp->address_list) || (sctp_list_single_entry(&bp->address_list))) { retval = -EBUSY; goto err_bindx_rem; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); if (!af) { retval = -EINVAL; goto err_bindx_rem; } if (!af->addr_valid(sa_addr, sp, NULL)) { retval = -EADDRNOTAVAIL; goto err_bindx_rem; } if (sa_addr->v4.sin_port && sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; } if (!sa_addr->v4.sin_port) sa_addr->v4.sin_port = htons(bp->port); /* FIXME - There is probably a need to check if sk->sk_saddr and * sk->sk_rcv_addr are currently set to one of the addresses to * be removed. This is something which needs to be looked into * when we are fixing the outstanding issues with multi-homing * socket routing and failover schemes. Refer to comments in * sctp_do_bind(). -daisy */ retval = sctp_del_bind_addr(bp, sa_addr); addr_buf += af->sockaddr_len; err_bindx_rem: if (retval < 0) { /* Failed. Add the ones that has been removed back */ if (cnt > 0) sctp_bindx_add(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Delete IP address parameters to all the peers of * the associations that are part of the endpoint indicating that a list of * local addresses are removed from the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; void *addr_buf; struct sctp_af *af; struct sctp_sockaddr_entry *saddr; int i; int retval = 0; int stored = 0; chunk = NULL; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * not present in the bind address list of the association. * If so, do not send the asconf chunk to its peer, but * continue with other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (!sctp_assoc_lookup_laddr(asoc, laddr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Find one address in the association's bind address list * that is not in the packed array of addresses. This is to * make sure that we do not delete all the addresses in the * association. */ bp = &asoc->base.bind_addr; laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, addrcnt, sp); if ((laddr == NULL) && (addrcnt == 1)) { if (asoc->asconf_addr_del_pending) continue; asoc->asconf_addr_del_pending = kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); if (asoc->asconf_addr_del_pending == NULL) { retval = -ENOMEM; goto out; } asoc->asconf_addr_del_pending->sa.sa_family = addrs->sa_family; asoc->asconf_addr_del_pending->v4.sin_port = htons(bp->port); if (addrs->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addrs; asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; } else if (addrs->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", __func__, asoc, &asoc->asconf_addr_del_pending->sa, asoc->asconf_addr_del_pending); asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; } if (laddr == NULL) return -EINVAL; /* We do not need RCU protection throughout this loop * because this is done under a socket lock from the * setsockopt call. */ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, SCTP_PARAM_DEL_IP); if (!chunk) { retval = -ENOMEM; goto out; } skip_mkasconf: /* Reset use_as_src flag for the addresses in the bind address * list that are to be deleted. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } /* Update the route and saddr entries for all the transports * as some of the addresses in the bind address list are * about to be deleted and cannot be used as source addresses. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { sctp_transport_dst_release(transport); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } if (stored) /* We don't need to transmit ASCONF */ continue; retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) { struct sock *sk = sctp_opt2sk(sp); union sctp_addr *addr; struct sctp_af *af; /* It is safe to write port space in caller. */ addr = &addrw->a; addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); af = sctp_get_af_specific(addr->sa.sa_family); if (!af) return -EINVAL; if (sctp_verify_addr(sk, addr, af->sockaddr_len)) return -EINVAL; if (addrw->state == SCTP_ADDR_NEW) return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); else return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); } /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() * * API 8.1 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, * int flags); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distinguish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns * -1, and sets errno to the appropriate error code. * * For SCTP, the port given in each socket address must be the same, or * sctp_bindx() will fail, setting errno to EINVAL. * * The flags parameter is formed from the bitwise OR of zero or more of * the following currently defined flags: * * SCTP_BINDX_ADD_ADDR * * SCTP_BINDX_REM_ADDR * * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given * addresses from the association. The two flags are mutually exclusive; * if both are given, sctp_bindx() will fail with EINVAL. A caller may * not remove all addresses from an association; sctp_bindx() will * reject such an attempt with EINVAL. * * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate * additional addresses with an endpoint after calling bind(). Or use * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening * socket is associated with so that no new association accepted will be * associated with those addresses. If the endpoint supports dynamic * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a * endpoint to send the appropriate message to the peer to change the * peers address lists. * * Adding and removing addresses from a connected association is * optional functionality. Implementations that do not support this * functionality should return EOPNOTSUPP. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * op Operation to perform (add or remove, see the flags of * sctp_bindx) * * Returns 0 if ok, <0 errno code on error. */ static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, int op) { struct sockaddr *kaddrs; int err; int addrcnt = 0; int walk_size = 0; struct sockaddr *sa_addr; void *addr_buf; struct sctp_af *af; pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", __func__, sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { kfree(kaddrs); return -EFAULT; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { kfree(kaddrs); return -EINVAL; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { kfree(kaddrs); return -EINVAL; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* Do the work. */ switch (op) { case SCTP_BINDX_ADD_ADDR: err = sctp_bindx_add(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); break; case SCTP_BINDX_REM_ADDR: err = sctp_bindx_rem(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); break; default: err = -EINVAL; break; } out: kfree(kaddrs); return err; } /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) * * Common routine for handling connect() and sctp_connectx(). * Connect will come in with just a single address. */ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; enum sctp_scope scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { struct sctp_af *af; if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) { err = -EINVAL; goto out_free; } /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); sp->pf->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); if (assoc_id) *assoc_id = asoc->assoc_id; err = sctp_wait_for_connect(asoc, &timeo); /* Note: the asoc may be freed after the return of * sctp_wait_for_connect. */ /* Don't free association on exit. */ asoc = NULL; out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_association_free(asoc); } return err; } /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() * * API 8.9 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, * sctp_assoc_t *asoc); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distengish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_connectx() returns 0. It also sets the assoc_id to * the association id of the new association. On failure, sctp_connectx() * returns -1, and sets errno to the appropriate error code. The assoc_id * is not touched by the kernel. * * For SCTP, the port given in each socket address must be the same, or * sctp_connectx() will fail, setting errno to EINVAL. * * An application can use sctp_connectx to initiate an association with * an endpoint that is multi-homed. Much like sctp_bindx() this call * allows a caller to specify multiple addresses at which a peer can be * reached. The way the SCTP stack uses the list of addresses to set up * the association is implementation dependent. This function only * specifies that the stack will try to make use of all the addresses in * the list when needed. * * Note that the list of addresses passed in is only used for setting up * the association. It does not necessarily equal the set of addresses * the peer uses for the resulting association. If the caller wants to * find out the set of peer addresses, it must use sctp_getpaddrs() to * retrieve them after the association has been set up. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_connectx(). This is used for tunneling * the sctp_connectx() request through sctp_setsockopt() from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) { struct sockaddr *kaddrs; gfp_t gfp = GFP_KERNEL; int err = 0; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ if (sk->sk_socket->file) gfp = GFP_USER | __GFP_NOWARN; kaddrs = kmalloc(addrs_size, gfp); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { err = -EFAULT; } else { err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); } kfree(kaddrs); return err; } /* * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ static int sctp_setsockopt_connectx_old(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } /* * New interface for the API. The since the API is done with a socket * option, to make it simple we feed back the association id is as a return * indication to the call. Error is always negative and association id is * always positive. */ static int sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; } /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the * addrs_num structure member. That way we can re-use the existing * code. */ #ifdef CONFIG_COMPAT struct compat_sctp_getaddrs_old { sctp_assoc_t assoc_id; s32 addr_num; compat_uptr_t addrs; /* struct sockaddr * */ }; #endif static int sctp_getsockopt_connectx3(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; #ifdef CONFIG_COMPAT if (in_compat_syscall()) { struct compat_sctp_getaddrs_old param32; if (len < sizeof(param32)) return -EINVAL; if (copy_from_user(&param32, optval, sizeof(param32))) return -EFAULT; param.assoc_id = param32.assoc_id; param.addr_num = param32.addr_num; param.addrs = compat_ptr(param32.addrs); } else #endif { if (len < sizeof(param)) return -EINVAL; if (copy_from_user(&param, optval, sizeof(param))) return -EFAULT; } err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) param.addrs, param.addr_num, &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; if (put_user(sizeof(assoc_id), optlen)) return -EFAULT; } return err; } /* API 3.1.4 close() - UDP Style Syntax * Applications use close() to perform graceful shutdown (as described in * Section 10.1 of [SCTP]) on ALL the associations currently represented * by a UDP-style socket. * * The syntax is * * ret = close(int sd); * * sd - the socket descriptor of the associations to be closed. * * To gracefully shutdown a specific association represented by the * UDP-style socket, an application should use the sendmsg() call, * passing no user data, but including the appropriate flag in the * ancillary data (see Section xxxx). * * If sd in the close() call is a branched-off socket representing only * one association, the shutdown is performed on that association only. * * 4.1.6 close() - TCP Style Syntax * * Applications use close() to gracefully close down an association. * * The syntax is: * * int close(int sd); * * sd - the socket descriptor of the association to be closed. * * After an application calls close() on a socket descriptor, no further * socket operations will succeed on that descriptor. * * API 7.1.4 SO_LINGER * * An application using the TCP-style socket can use this option to * perform the SCTP ABORT primitive. The linger option structure is: * * struct linger { * int l_onoff; // option on/off * int l_linger; // linger time * }; * * To enable the option, set l_onoff to 1. If the l_linger value is set * to 0, calling close() is the same as the ABORT primitive. If the * value is set to a negative value, the setsockopt() call will return * an error. If the value is set to a positive value linger_time, the * close() can be blocked for at most linger_time ms. If the graceful * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; unsigned int data_was_unread; pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; ep = sctp_sk(sk)->ep; /* Clean up any skbs sitting on the receive queue. */ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); if (sctp_style(sk, TCP)) { /* A closed association can still be in the list if * it belongs to a TCP-style listening socket that is * not yet accepted. If so, free it. If not, send an * ABORT or SHUTDOWN based on the linger options. */ if (sctp_state(asoc, CLOSED)) { sctp_association_free(asoc); continue; } } if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || !skb_queue_empty(&asoc->ulpq.reasm) || (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); sctp_primitive_ABORT(net, asoc, chunk); } else sctp_primitive_SHUTDOWN(net, asoc, NULL); } /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); /* This will run the backlog queue. */ release_sock(sk); /* Supposedly, no process has access to the socket, but * the net layers still may. * Also, sctp_destroy_sock() needs to be called with addr_wq_lock * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); bh_lock_sock_nested(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. */ sock_hold(sk); sk_common_release(sk); bh_unlock_sock(sk); spin_unlock_bh(&net->sctp.addr_wq_lock); sock_put(sk); SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ static int sctp_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } /* API 3.1.3 sendmsg() - UDP Style Syntax * * An application uses sendmsg() and recvmsg() calls to transmit data to * and receive data from its peer. * * ssize_t sendmsg(int socket, const struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. * * Note: This function could use a rewrite especially when explicit * connect support comes in. */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs); static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc = NULL, *asoc = NULL; struct sctp_transport *transport, *chunk_tp; struct sctp_chunk *chunk; union sctp_addr to; struct sockaddr *msg_name = NULL; struct sctp_sndrcvinfo default_sinfo; struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; struct sctp_cmsgs cmsgs = { NULL }; enum sctp_scope scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; long timeo; int err; err = 0; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, msg, msg_len, ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { err = -EPIPE; goto out_nounlock; } /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); if (err) { pr_debug("%s: msghdr parse err:%x\n", __func__, err); goto out_nounlock; } /* Fetch the destination address for this packet. This * address only selects the association--it is not necessarily * the address we will send to. * For a peeled-off socket, msg_name is ignored. */ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { int msg_namelen = msg->msg_namelen; err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, msg_namelen); if (err) return err; if (msg_namelen > sizeof(to)) msg_namelen = sizeof(to); memcpy(&to, msg->msg_name, msg_namelen); msg_name = msg->msg_name; } sinit = cmsgs.init; if (cmsgs.sinfo != NULL) { memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; sinfo = &default_sinfo; fill_sinfo_ttl = true; } else { sinfo = cmsgs.srinfo; } /* Did the user specify SNDINFO/SNDRCVINFO? */ if (sinfo) { sinfo_flags = sinfo->sinfo_flags; associd = sinfo->sinfo_assoc_id; } pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_EOF is set, no data can be sent. Disallow sending zero * length messages when SCTP_EOF|SCTP_ABORT is not set. * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } transport = NULL; pr_debug("%s: about to look up association\n", __func__); lock_sock(sk); /* If a msg_name has been specified, assume this is to be used. */ if (msg_name) { /* Look for a matching association on the endpoint. */ asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); /* If we could not find a matching association on the * endpoint, make sure that it is not a TCP-style * socket that already has an association or there is * no peeled-off association on another socket. */ if (!asoc && ((sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING))) || sctp_endpoint_is_peeled_off(ep, &to))) { err = -EADDRNOTAVAIL; goto out_unlock; } } else { asoc = sctp_id2assoc(sk, associd); if (!asoc) { err = -EPIPE; goto out_unlock; } } if (asoc) { pr_debug("%s: just looked up association:%p\n", __func__, asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can * happen when an accepted socket has an association that is * already CLOSED. */ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { err = -EPIPE; goto out_unlock; } if (sinfo_flags & SCTP_EOF) { pr_debug("%s: shutting down association:%p\n", __func__, asoc); sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { chunk = sctp_make_abort_user(asoc, msg, msg_len); if (!chunk) { err = -ENOMEM; goto out_unlock; } pr_debug("%s: aborting association:%p\n", __func__, asoc); sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } } /* Do we need to create the association? */ if (!asoc) { pr_debug("%s: there is no association yet\n", __func__); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } /* Check for invalid stream against the stream counts, * either the default or the user specified stream counts. */ if (sinfo) { if (!sinit || !sinit->sinit_num_ostreams) { /* Check against the defaults. */ if (sinfo->sinfo_stream >= sp->initmsg.sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } else { /* Check against the requested. */ if (sinfo->sinfo_stream >= sinit->sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } } /* * API 3.1.2 bind() - UDP Style Syntax * If a bind() or sctp_bindx() is not called prior to a * sendmsg() call that initiates a new association, the * system picks an ephemeral port and will choose an address * set equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_unlock; } } else { /* * If an unprivileged user inherits a one-to-many * style socket with open associations on a privileged * port, it MAY be permitted to accept new associations, * but it SHOULD NOT be permitted to open new * associations. */ if (ep->base.bind_addr.port < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; } } scope = sctp_scope(&to); new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!new_asoc) { err = -ENOMEM; goto out_unlock; } asoc = new_asoc; err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { err = -ENOMEM; goto out_free; } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. */ if (sinit) { if (sinit->sinit_num_ostreams) { asoc->c.sinit_num_ostreams = sinit->sinit_num_ostreams; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = sinit->sinit_max_instreams; } if (sinit->sinit_max_attempts) { asoc->max_init_attempts = sinit->sinit_max_attempts; } if (sinit->sinit_max_init_timeo) { asoc->max_init_timeo = msecs_to_jiffies(sinit->sinit_max_init_timeo); } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } } /* ASSERT: we have a valid association at this point. */ pr_debug("%s: we have a valid association\n", __func__); if (!sinfo) { /* If the user didn't specify SNDINFO/SNDRCVINFO, make up * one with some defaults. */ memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_context = asoc->default_context; default_sinfo.sinfo_timetolive = asoc->default_timetolive; default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); sinfo = &default_sinfo; } else if (fill_sinfo_ttl) { /* In case SNDINFO was specified, we still need to fill * it with a default ttl from the assoc here. */ sinfo->sinfo_timetolive = asoc->default_timetolive; } /* API 7.1.7, the sndbuf size per association bounds the * maximum size of data that can be sent in a single send call. */ if (msg_len > sk->sk_sndbuf) { err = -EMSGSIZE; goto out_free; } if (asoc->pmtu_pending) sctp_assoc_pending_pmtu(asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like * a great fit. */ if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { err = -EMSGSIZE; goto out_free; } /* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->stream.outcnt) { err = -EINVAL; goto out_free; } if (sctp_wspace(asoc) < msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) goto out_free; } /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; goto out_free; } } else chunk_tp = NULL; /* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; } asoc->force_delay = !!(msg->msg_flags & MSG_MORE); /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { sctp_chunk_hold(chunk); /* Do accounting for the write space. */ sctp_set_owner_w(chunk); chunk->transport = chunk_tp; } /* Send it to the lower layers. Note: all chunks * must either fail or succeed. The lower layer * works that way today. Keep it that way or this * breaks. */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) { sctp_datamsg_free(datamsg); goto out_free; } pr_debug("%s: we sent primitively\n", __func__); sctp_datamsg_put(datamsg); err = msg_len; if (unlikely(wait_connect)) { timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); sctp_wait_for_connect(asoc, &timeo); } /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ goto out_unlock; out_free: if (new_asoc) sctp_association_free(asoc); out_unlock: release_sock(sk); out_nounlock: return sctp_error(sk, msg_flags, err); #if 0 do_sock_err: if (msg_len) err = msg_len; else err = sock_error(sk); goto out; do_interrupted: if (msg_len) err = msg_len; goto out; #endif /* 0 */ } /* This is an extended version of skb_pull() that removes the data from the * start of a skb even when data is spread across the list of skb's in the * frag_list. len specifies the total amount of data that needs to be removed. * when 'len' bytes could be removed from the skb, it returns 0. * If 'len' exceeds the total skb length, it returns the no. of bytes that * could not be removed. */ static int sctp_skb_pull(struct sk_buff *skb, int len) { struct sk_buff *list; int skb_len = skb_headlen(skb); int rlen; if (len <= skb_len) { __skb_pull(skb, len); return 0; } len -= skb_len; __skb_pull(skb, skb_len); skb_walk_frags(skb, list) { rlen = sctp_skb_pull(list, len); skb->len -= (len-rlen); skb->data_len -= (len-rlen); if (!rlen) return 0; len = rlen; } return len; } /* API 3.1.3 recvmsg() - UDP Style Syntax * * ssize_t recvmsg(int socket, struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); struct sk_buff *skb, *head_skb; int copied; int err = 0; int skb_len; pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, addr_len); lock_sock(sk); if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) { err = -ENOTCONN; goto out; } skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; /* Get the total length of the skb including any skb's in the * frag_list. */ skb_len = skb->len; copied = skb_len; if (copied > len) copied = len; err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); if (err) goto out_free; if (event->chunk && event->chunk->head_skb) head_skb = event->chunk->head_skb; else head_skb = skb; sock_recv_ts_and_drops(msg, sk, head_skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); } else { sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len); } /* Check if we allow SCTP_NXTINFO. */ if (sp->recvnxtinfo) sctp_ulpevent_read_nxtinfo(event, msg, sk); /* Check if we allow SCTP_RCVINFO. */ if (sp->recvrcvinfo) sctp_ulpevent_read_rcvinfo(event, msg); /* Check if we allow SCTP_SNDRCVINFO. */ if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); err = copied; /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; if (flags & MSG_PEEK) goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ if (!sctp_ulpevent_is_notification(event)) sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) msg->msg_flags |= MSG_EOR; else msg->msg_flags &= ~MSG_EOR; out_free: if (flags & MSG_PEEK) { /* Release the skb reference acquired after peeking the skb in * sctp_skb_recv_datagram(). */ kfree_skb(skb); } else { /* Free the event which includes releasing the reference to * the owner of the skb, freeing the skb and updating the * rwnd. */ sctp_ulpevent_free(event); } out: release_sock(sk); return err; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_setsockopt_disable_fragments(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_ulpevent *event; if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. */ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, &sctp_sk(sk)->subscribe)) { asoc = sctp_id2assoc(sk, 0); if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return -ENOMEM; sctp_ulpq_tail_event(&asoc->ulpq, event); } } return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct net *net = sock_net(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; if (sp->autoclose > net->sctp.max_autoclose) sp->autoclose = net->sctp.max_autoclose; return 0; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_HB_TIME_IS_ZERO - Specify's that the time for * heartbeat delayis to be set to the value of 0 * milliseconds. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; } static int sctp_setsockopt_peer_addr_params(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; if (optlen != sizeof(struct sctp_paddrparams)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; pmtud_change = params.spp_flags & SPP_PMTUD; sackdelay_change = params.spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || params.spp_sackdelay > 500 || (params.spp_pathmtu && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) return -EINVAL; } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ error = sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); if (error) return error; /* If changes are for association, also apply parameters to each * transport. */ if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } } return 0; } static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return -EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackdelay = params.sack_delay; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = sctp_spp_sackdelay_disable(asoc->param_flags); } else { sp->param_flags = sctp_spp_sackdelay_disable(sp->param_flags); } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackfreq = params.sack_freq; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } if (params.sack_freq == 1) { trans->param_flags = sctp_spp_sackdelay_disable(trans->param_flags); } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } } } return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; if (copy_from_user(&sinit, optval, optlen)) return -EFAULT; if (sinit.sinit_num_ostreams) sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; if (sinit.sinit_max_instreams) sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; if (sinit.sinit_max_attempts) sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; if (sinit.sinit_max_init_timeo) sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; return 0; } /* * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.sinfo_stream; asoc->default_flags = info.sinfo_flags; asoc->default_ppid = info.sinfo_ppid; asoc->default_context = info.sinfo_context; asoc->default_timetolive = info.sinfo_timetolive; } else { sp->default_stream = info.sinfo_stream; sp->default_flags = info.sinfo_flags; sp->default_ppid = info.sinfo_ppid; sp->default_context = info.sinfo_context; sp->default_timetolive = info.sinfo_timetolive; } return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_setsockopt_default_sndinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.snd_sid; asoc->default_flags = info.snd_flags; asoc->default_ppid = info.snd_ppid; asoc->default_context = info.snd_context; } else { sp->default_stream = info.snd_sid; sp->default_flags = info.snd_flags; sp->default_ppid = info.snd_ppid; sp->default_context = info.snd_context; } return 0; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_prim prim; struct sctp_transport *trans; if (optlen != sizeof(struct sctp_prim)) return -EINVAL; if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) return -EFAULT; trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); if (!trans) return -EINVAL; sctp_assoc_set_primary(trans->asoc, trans); return 0; } /* * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; rto_max = rtoinfo.srto_max; rto_min = rtoinfo.srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; else rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; if (rto_min) rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; else rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; if (rto_min > rto_max) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; if (copy_from_user(&assocparams, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { if (assocparams.sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, transports) { path_sum += peer_addr->pathmaxrxt; paths++; } /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. */ if (paths > 1 && assocparams.sasoc_asocmaxrxt > path_sum) return -EINVAL; asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } if (assocparams.sasoc_cookie_life != 0) asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); if (assocparams.sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = assocparams.sasoc_asocmaxrxt; if (assocparams.sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = assocparams.sasoc_cookie_life; } return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (val) sp->v4mapped = 1; else sp->v4mapped = 0; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; } /* * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) * * Requests that the peer mark the enclosed address as the association * primary. The enclosed address must be one of the association's * locally bound addresses. The following structure is used to make a * set primary request: */ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); pr_debug("%s: we set peer primary addr primitively\n", __func__); return err; } static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_setadaptation adaptation; if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; if (copy_from_user(&adaptation, optval, optlen)) return -EFAULT; sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * * The context field in the sctp_sndrcvinfo structure is normally only * used when a failed message is retrieved holding the value that was * sent down on the actual send call. This option allows the setting of * a default context on an association basis that will be received on * reading messages from the peer. This is especially helpful in the * one-2-many model for an application to keep some reference to an * internal state machine that is processing messages on the * association. Note that the setting of this value only effects * received messages from the peer and does not effect the value that is * saved with outbound messages. */ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * * This options will at a minimum specify if the implementation is doing * fragmented interleave. Fragmented interleave, for a one to many * socket, is when subsequent calls to receive a message may return * parts of messages from different associations. Some implementations * may allow you to turn this value on or off. If so, when turned off, * no fragment interleave will occur (which will cause a head of line * blocking amongst multiple associations sharing the same one to many * socket). When this option is turned on, then each receive call may * come from a different association (thus the user must receive data * with the extended calls (e.g. sctp_recvmsg) to keep track of which * association each receive belongs to. * * This option takes a boolean value. A non-zero value indicates that * fragmented interleave is on. A value of zero indicates that * fragmented interleave is off. * * Note that it is important that an implementation that allows this * option to be turned on, have it off by default. Otherwise an unaware * application using the one to many model may become confused and act * incorrectly. */ static int sctp_setsockopt_fragment_interleave(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen != sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; return 0; } /* * 8.1.21. Set or Get the SCTP Partial Delivery Point * (SCTP_PARTIAL_DELIVERY_POINT) * * This option will set or get the SCTP partial delivery point. This * point is the size of a message where the partial delivery API will be * invoked to help free up rwnd space for the peer. Setting this to a * lower value will cause partial deliveries to happen more often. The * calls argument is an integer that sets or gets the partial delivery * point. Note also that the call will fail if the user attempts to set * this value larger than the socket receive buffer size. * * Note that any single message having a length smaller than or equal to * the SCTP partial delivery point will be delivered in one single read * call as long as the user provided buffer is large enough to hold the * message. */ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, char __user *optval, unsigned int optlen) { u32 val; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ if (val > (sk->sk_rcvbuf >> 1)) return -EINVAL; sctp_sk(sk)->pd_point = val; return 0; /* is this the right error code? */ } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * * This option will allow a user to change the maximum burst of packets * that can be emitted by this association. Note that the default value * is 4, and some implementations may restrict this setting so that it * can only be lowered. * * NOTE: This text doesn't seem right. Do this on a socket basis with * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; int val; int assoc_id = 0; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option deprecated.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; assoc_id = params.assoc_id; } else return -EINVAL; sp = sctp_sk(sk); if (assoc_id != 0) { asoc = sctp_id2assoc(sk, assoc_id); if (!asoc) return -EINVAL; asoc->max_burst = val; } else sp->max_burst = val; return 0; } /* * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) * * This set option adds a chunk type that the user is requesting to be * received only in an authenticated way. Changes to the list of chunks * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; switch (val.sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: case SCTP_CID_AUTH: return -EINVAL; } /* add this chunk id to the endpoint */ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); } /* * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) * * This option gets or sets the list of HMAC algorithms that the local * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo *hmacs; u32 idents; int err; if (!ep->auth_enable) return -EACCES; if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; hmacs = memdup_user(optval, optlen); if (IS_ERR(hmacs)) return PTR_ERR(hmacs); idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } err = sctp_auth_ep_set_hmacs(ep, hmacs); out: kfree(hmacs); return err; } /* * 7.1.20. Set a shared key (SCTP_AUTH_KEY) * * This option will set a shared secret key which is used to build an * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkey *authkey; struct sctp_association *asoc; int ret; if (!ep->auth_enable) return -EACCES; if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; authkey = memdup_user(optval, optlen); if (IS_ERR(authkey)) return PTR_ERR(authkey); if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { ret = -EINVAL; goto out; } asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; goto out; } ret = sctp_auth_set_key(ep, asoc, authkey); out: kzfree(authkey); return ret; } /* * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) * * This option will get or set the active shared key to be used to build * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); } /* * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) * * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); } /* * 8.1.23 SCTP_AUTO_ASCONF * * This option will enable or disable the use of the automatic generation of * ASCONF chunks to add and delete addresses to an existing association. Note * that this option has two caveats namely: a) it only affects sockets that * are bound to all addresses available to the SCTP stack, and b) the system * administrator may have an overriding control that turns the ASCONF feature * off no matter what setting the socket option may have. * This option expects an integer boolean flag, where a non-zero value turns on * the option, and a zero value turns off the option. * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (!sctp_is_ep_boundall(sk) && val) return -EINVAL; if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) return 0; spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); if (val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to alter the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (optlen < sizeof(struct sctp_paddrthlds)) return -EINVAL; if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, sizeof(struct sctp_paddrthlds))) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } if (val.spt_pathmaxrxt) asoc->pathmaxrxt = val.spt_pathmaxrxt; asoc->pf_retrans = val.spt_pathpfthld; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } return 0; } static int sctp_setsockopt_recvrcvinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_recvnxtinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_pr_supported(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(params)) goto out; if (copy_from_user(&params, optval, optlen)) { retval = -EFAULT; goto out; } asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { asoc->prsctp_enable = !!params.assoc_value; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); sp->ep->prsctp_enable = !!params.assoc_value; } else { goto out; } retval = 0; out: return retval; } static int sctp_setsockopt_default_prinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(info)) goto out; if (copy_from_user(&info, optval, sizeof(info))) { retval = -EFAULT; goto out; } if (info.pr_policy & ~SCTP_PR_SCTP_MASK) goto out; if (info.pr_policy == SCTP_PR_SCTP_NONE) info.pr_value = 0; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); asoc->default_timetolive = info.pr_value; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); sp->default_timetolive = info.pr_value; } else { goto out; } retval = 0; out: return retval; } static int sctp_setsockopt_reconfig_supported(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(params)) goto out; if (copy_from_user(&params, optval, optlen)) { retval = -EFAULT; goto out; } asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { asoc->reconf_enable = !!params.assoc_value; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); sp->ep->reconf_enable = !!params.assoc_value; } else { goto out; } retval = 0; out: return retval; } static int sctp_setsockopt_enable_strreset(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(params)) goto out; if (copy_from_user(&params, optval, optlen)) { retval = -EFAULT; goto out; } if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK)) goto out; asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { asoc->strreset_enable = params.assoc_value; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); sp->ep->strreset_enable = params.assoc_value; } else { goto out; } retval = 0; out: return retval; } static int sctp_setsockopt_reset_streams(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_reset_streams *params; struct sctp_association *asoc; int retval = -EINVAL; if (optlen < sizeof(struct sctp_reset_streams)) return -EINVAL; params = memdup_user(optval, optlen); if (IS_ERR(params)) return PTR_ERR(params); asoc = sctp_id2assoc(sk, params->srs_assoc_id); if (!asoc) goto out; retval = sctp_send_reset_streams(asoc, params); out: kfree(params); return retval; } static int sctp_setsockopt_reset_assoc(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; sctp_assoc_t associd; int retval = -EINVAL; if (optlen != sizeof(associd)) goto out; if (copy_from_user(&associd, optval, optlen)) { retval = -EFAULT; goto out; } asoc = sctp_id2assoc(sk, associd); if (!asoc) goto out; retval = sctp_send_reset_assoc(asoc); out: return retval; } static int sctp_setsockopt_add_streams(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_add_streams params; int retval = -EINVAL; if (optlen != sizeof(params)) goto out; if (copy_from_user(&params, optval, optlen)) { retval = -EFAULT; goto out; } asoc = sctp_id2assoc(sk, params.sas_assoc_id); if (!asoc) goto out; retval = sctp_send_add_streams(asoc, &params); out: return retval; } /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve * socket options. Socket options are used to change the default * behavior of sockets calls. They are described in Section 7. * * The syntax is: * * ret = getsockopt(int sd, int level, int optname, void __user *optval, * int __user *optlen); * ret = setsockopt(int sd, int level, int optname, const void __user *optval, * int optlen); * * sd - the socket descript. * level - set to IPPROTO_SCTP for all SCTP options. * optname - the option name. * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int retval = 0; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of setsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->setsockopt(sk, level, optname, optval, optlen); goto out_nounlock; } lock_sock(sk); switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx_old(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); break; case SCTP_EVENTS: retval = sctp_setsockopt_events(sk, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_setsockopt_autoclose(sk, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_setsockopt_pr_supported(sk, optval, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); break; case SCTP_RECONFIG_SUPPORTED: retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen); break; case SCTP_ENABLE_STREAM_RESET: retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); break; case SCTP_RESET_STREAMS: retval = sctp_setsockopt_reset_streams(sk, optval, optlen); break; case SCTP_RESET_ASSOC: retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); break; case SCTP_ADD_STREAMS: retval = sctp_setsockopt_add_streams(sk, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); out_nounlock: return retval; } /* API 3.1.6 connect() - UDP Style Syntax * * An application may use the connect() call in the UDP model to initiate an * association without sending data. * * The syntax is: * * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); * * sd: the socket descriptor to have a new association added to. * * nam: the address structure (either struct sockaddr_in or struct * sockaddr_in6 defined in RFC2553 [7]). * * len: the size of the address. */ static int sctp_connect(struct sock *sk, struct sockaddr *addr, int addr_len) { int err = 0; struct sctp_af *af; lock_sock(sk); pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); if (!af || addr_len < af->sockaddr_len) { err = -EINVAL; } else { /* Pass correct addr len to common routine (so it knows there * is only one address being passed. */ err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); } release_sock(sk); return err; } /* FIXME: Write comments. */ static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP * association from the accept queue of the endpoint. A new socket * descriptor will be returned from accept() to represent the newly * formed association. */ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sock *newsk = NULL; struct sctp_association *asoc; long timeo; int error = 0; lock_sock(sk); sp = sctp_sk(sk); ep = sp->ep; if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; } if (!sctp_sstate(sk, LISTENING)) { error = -EINVAL; goto out; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); error = sctp_wait_for_accept(sk, timeo); if (error) goto out; /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); newsk = sp->pf->create_accept_sk(sk, asoc, kern); if (!newsk) { error = -ENOMEM; goto out; } /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); out: release_sock(sk); *err = error; return newsk; } /* The SCTP ioctl handler. */ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); /* * SEQPACKET-style sockets in LISTENING state are valid, for * SCTP, so only discard TCP-style sockets in LISTENING state. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned int amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); break; } default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } /* This is the function which gets called during socket creation to * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); sp = sctp_sk(sk); /* Initialize the SCTP per socket area. */ switch (sk->sk_type) { case SOCK_SEQPACKET: sp->type = SCTP_SOCKET_UDP; break; case SOCK_STREAM: sp->type = SCTP_SOCKET_TCP; break; default: return -ESOCKTNOSUPPORT; } sk->sk_gso_type = SKB_GSO_SCTP; /* Initialize default send parameters. These parameters can be * modified with the SCTP_DEFAULT_SEND_PARAM socket option. */ sp->default_stream = 0; sp->default_ppid = 0; sp->default_flags = 0; sp->default_context = 0; sp->default_timetolive = 0; sp->default_rcv_context = 0; sp->max_burst = net->sctp.max_burst; sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or * overridden by the SCTP_INIT CMSG. */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ sp->rtoinfo.srto_initial = net->sctp.rto_initial; sp->rtoinfo.srto_max = net->sctp.rto_max; sp->rtoinfo.srto_min = net->sctp.rto_min; /* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; /* Initialize default event subscriptions. By default, all the * options are off. */ memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ sp->hbinterval = net->sctp.hb_interval; sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; /* allow default discovery */ sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* If enabled no SCTP message fragmentation will be performed. * Configure through SCTP_DISABLE_FRAGMENTS socket option. */ sp->disable_fragments = 0; /* Enable Nagle algorithm by default. */ sp->nodelay = 0; sp->recvrcvinfo = 0; sp->recvnxtinfo = 0; /* Enable by default. */ sp->v4mapped = 1; /* Auto-close idle associations after the configured * number of seconds. A value of 0 disables this * feature. Configure through the SCTP_AUTOCLOSE socket option, * for UDP-style sockets only. */ sp->autoclose = 0; /* User specified fragmentation limit. */ sp->user_frag = 0; sp->adaptation_ind = 0; sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); if (!sp->ep) return -ENOMEM; sp->hmac = NULL; sk->sk_destruct = sctp_destruct_sock; SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise * sctp_destroy_sock() will be called without addr_wq_lock held */ if (net->sctp.default_auto_asconf) { spin_lock(&sock_net(sk)->sctp.addr_wq_lock); list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); } else { sp->do_auto_asconf = 0; } local_bh_enable(); return 0; } /* Cleanup any SCTP per socket resources. Must be called with * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true */ static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); /* This could happen during socket init, thus we bail out * early, since the rest of the below is not setup either. */ if (sp->ep == NULL) return; if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ static void sctp_destruct_sock(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_shash(sp->hmac); inet_sock_destruct(sk); } /* API 4.1.7 shutdown() - TCP Style Syntax * int shutdown(int socket, int how); * * sd - the socket descriptor of the association to be closed. * how - Specifies the type of shutdown. The values are * as follows: * SHUT_RD * Disables further receive operations. No SCTP * protocol action is taken. * SHUT_WR * Disables further send operations, and initiates * the SCTP shutdown sequence. * SHUT_RDWR * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; if (!sctp_style(sk, TCP)) return; ep = sctp_sk(sk)->ep; if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { struct sctp_association *asoc; sk->sk_state = SCTP_SS_CLOSING; asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_primitive_SHUTDOWN(net, asoc, NULL); } } int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, struct sctp_info *info) { struct sctp_transport *prim; struct list_head *pos; int mask; memset(info, 0, sizeof(*info)); if (!asoc) { struct sctp_sock *sp = sctp_sk(sk); info->sctpi_s_autoclose = sp->autoclose; info->sctpi_s_adaptation_ind = sp->adaptation_ind; info->sctpi_s_pd_point = sp->pd_point; info->sctpi_s_nodelay = sp->nodelay; info->sctpi_s_disable_fragments = sp->disable_fragments; info->sctpi_s_v4mapped = sp->v4mapped; info->sctpi_s_frag_interleave = sp->frag_interleave; info->sctpi_s_type = sp->type; return 0; } info->sctpi_tag = asoc->c.my_vtag; info->sctpi_state = asoc->state; info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); info->sctpi_instrms = asoc->stream.incnt; info->sctpi_outstrms = asoc->stream.outcnt; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) info->sctpi_outqueue++; info->sctpi_overall_error = asoc->overall_error_count; info->sctpi_max_burst = asoc->max_burst; info->sctpi_maxseg = asoc->frag_point; info->sctpi_peer_rwnd = asoc->peer.rwnd; info->sctpi_peer_tag = asoc->c.peer_vtag; mask = asoc->peer.ecn_capable << 1; mask = (mask | asoc->peer.ipv4_address) << 1; mask = (mask | asoc->peer.ipv6_address) << 1; mask = (mask | asoc->peer.hostname_address) << 1; mask = (mask | asoc->peer.asconf_capable) << 1; mask = (mask | asoc->peer.prsctp_capable) << 1; mask = (mask | asoc->peer.auth_capable); info->sctpi_peer_capable = mask; mask = asoc->peer.sack_needed << 1; mask = (mask | asoc->peer.sack_generation) << 1; mask = (mask | asoc->peer.zero_window_announced); info->sctpi_peer_sack = mask; info->sctpi_isacks = asoc->stats.isacks; info->sctpi_osacks = asoc->stats.osacks; info->sctpi_opackets = asoc->stats.opackets; info->sctpi_ipackets = asoc->stats.ipackets; info->sctpi_rtxchunks = asoc->stats.rtxchunks; info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; info->sctpi_idupchunks = asoc->stats.idupchunks; info->sctpi_gapcnt = asoc->stats.gapcnt; info->sctpi_ouodchunks = asoc->stats.ouodchunks; info->sctpi_iuodchunks = asoc->stats.iuodchunks; info->sctpi_oodchunks = asoc->stats.oodchunks; info->sctpi_iodchunks = asoc->stats.iodchunks; info->sctpi_octrlchunks = asoc->stats.octrlchunks; info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; prim = asoc->peer.primary_path; memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr)); info->sctpi_p_state = prim->state; info->sctpi_p_cwnd = prim->cwnd; info->sctpi_p_srtt = prim->srtt; info->sctpi_p_rto = jiffies_to_msecs(prim->rto); info->sctpi_p_hbinterval = prim->hbinterval; info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); info->sctpi_p_ssthresh = prim->ssthresh; info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; info->sctpi_p_flight_size = prim->flight_size; info->sctpi_p_error = prim->error_count; return 0; } EXPORT_SYMBOL_GPL(sctp_get_sctp_info); /* use callback to avoid exporting the core structure */ int sctp_transport_walk_start(struct rhashtable_iter *iter) { int err; rhltable_walk_enter(&sctp_transport_hashtable, iter); err = rhashtable_walk_start(iter); if (err && err != -EAGAIN) { rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); return err; } return 0; } void sctp_transport_walk_stop(struct rhashtable_iter *iter) { rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); } struct sctp_transport *sctp_transport_get_next(struct net *net, struct rhashtable_iter *iter) { struct sctp_transport *t; t = rhashtable_walk_next(iter); for (; t; t = rhashtable_walk_next(iter)) { if (IS_ERR(t)) { if (PTR_ERR(t) == -EAGAIN) continue; break; } if (net_eq(sock_net(t->asoc->base.sk), net) && t->asoc->peer.primary_path == t) break; } return t; } struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos) { void *obj = SEQ_START_TOKEN; while (pos && (obj = sctp_transport_get_next(net, iter)) && !IS_ERR(obj)) pos--; return obj; } int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p) { int err = 0; int hash = 0; struct sctp_ep_common *epb; struct sctp_hashbucket *head; for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { read_lock_bh(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } read_unlock_bh(&head->lock); } return err; } EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, void *p) { struct sctp_transport *transport; int err; rcu_read_lock(); transport = sctp_addrs_lookup_transport(net, laddr, paddr); rcu_read_unlock(); if (!transport) return -ENOENT; err = cb(transport, p); sctp_transport_put(transport); return err; } EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), int (*cb_done)(struct sctp_transport *, void *), struct net *net, int *pos, void *p) { struct rhashtable_iter hti; struct sctp_transport *tsp; int ret; again: ret = sctp_transport_walk_start(&hti); if (ret) return ret; tsp = sctp_transport_get_idx(net, &hti, *pos + 1); for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { if (!sctp_transport_hold(tsp)) continue; ret = cb(tsp, p); if (ret) break; (*pos)++; sctp_transport_put(tsp); } sctp_transport_walk_stop(&hti); if (ret) { if (cb_done && !cb_done(tsp, p)) { (*pos)++; sctp_transport_put(tsp); goto again; } sctp_transport_put(tsp); } return ret; } EXPORT_SYMBOL_GPL(sctp_for_each_transport); /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an * association, including association state, peer receiver window size, * number of unacked data chunks, and number of data chunks pending * receipt. This information is read-only. */ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_status status; struct sctp_association *asoc = NULL; struct sctp_transport *transport; sctp_assoc_t associd; int retval = 0; if (len < sizeof(status)) { retval = -EINVAL; goto out; } len = sizeof(status); if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } associd = status.sstat_assoc_id; asoc = sctp_id2assoc(sk, associd); if (!asoc) { retval = -EINVAL; goto out; } transport = asoc->peer.primary_path; status.sstat_assoc_id = sctp_assoc2id(asoc); status.sstat_state = sctp_assoc_to_state(asoc); status.sstat_rwnd = asoc->peer.rwnd; status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); status.sstat_instrms = asoc->stream.incnt; status.sstat_outstrms = asoc->stream.outcnt; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, transport->af_specific->sockaddr_len); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)&status.sstat_primary.spinfo_address); status.sstat_primary.spinfo_state = transport->state; status.sstat_primary.spinfo_cwnd = transport->cwnd; status.sstat_primary.spinfo_srtt = transport->srtt; status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); status.sstat_primary.spinfo_mtu = transport->pathmtu; if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) status.sstat_primary.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", __func__, len, status.sstat_state, status.sstat_rwnd, status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) * * Applications can retrieve information about a specific peer address * of an association, including its reachability state, congestion * window, and retransmission timer values. This information is * read-only. */ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrinfo pinfo; struct sctp_transport *transport; int retval = 0; if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } len = sizeof(pinfo); if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, pinfo.spinfo_assoc_id); if (!transport) return -EINVAL; pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); pinfo.spinfo_state = transport->state; pinfo.spinfo_cwnd = transport->cwnd; pinfo.spinfo_srtt = transport->srtt; pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); pinfo.spinfo_mtu = transport->pathmtu; if (pinfo.spinfo_state == SCTP_UNKNOWN) pinfo.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &pinfo, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->disable_fragments == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) * * This socket option is used to specify various notifications and * ancillary data the user wishes to receive. */ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len == 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) { /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } /* Helper routine to branch off an association to a new socket. */ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; if (!asoc) return -EINVAL; /* If there is a thread waiting on more sndbuf space for * sending on this asoc, it cannot be peeled. */ if (waitqueue_active(&asoc->wait)) return -EBUSY; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } EXPORT_SYMBOL(sctp_do_peeloff); static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff, struct file **newfile, unsigned flags) { struct socket *newsock; int retval; retval = sctp_do_peeloff(sk, peeloff->associd, &newsock); if (retval < 0) goto out; /* Map the socket to an unused fd that can be returned to the user. */ retval = get_unused_fd_flags(flags & SOCK_CLOEXEC); if (retval < 0) { sock_release(newsock); goto out; } *newfile = sock_alloc_file(newsock, 0, NULL); if (IS_ERR(*newfile)) { put_unused_fd(retval); sock_release(newsock); retval = PTR_ERR(*newfile); *newfile = NULL; return retval; } pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, retval); peeloff->sd = retval; if (flags & SOCK_NONBLOCK) (*newfile)->f_flags |= O_NONBLOCK; out: return retval; } static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_arg_t peeloff; struct file *newfile = NULL; int retval = 0; if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0); if (retval < 0) goto out; /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_flags_arg_t peeloff; struct file *newfile = NULL; int retval = 0; if (len < sizeof(sctp_peeloff_flags_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_flags_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg, &newfile, peeloff.flags); if (retval < 0) goto out; /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; len = sizeof(struct sctp_paddrparams); if (copy_from_user(&params, optval, len)) return -EFAULT; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) { pr_debug("%s: failed no transport\n", __func__); return -EINVAL; } } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { pr_debug("%s: failed no association\n", __func__); return -EINVAL; } if (trans) { /* Fetch transport values. */ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); params.spp_pathmtu = trans->pathmtu; params.spp_pathmaxrxt = trans->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); params.spp_pathmtu = asoc->pathmtu; params.spp_pathmaxrxt = asoc->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; params.spp_pathmtu = sp->pathmtu; params.spp_sackdelay = sp->sackdelay; params.spp_pathmaxrxt = sp->pathmaxrxt; /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sack_info params; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len >= sizeof(struct sctp_sack_info)) { len = sizeof(struct sctp_sack_info); if (copy_from_user(&params, optval, len)) return -EFAULT; } else if (len == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { /* Fetch association values. */ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = jiffies_to_msecs( asoc->sackdelay); params.sack_freq = asoc->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } else { /* Fetch socket values. */ if (sp->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = sp->sackdelay; params.sack_freq = sp->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len < sizeof(struct sctp_initmsg)) return -EINVAL; len = sizeof(struct sctp_initmsg); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; size_t space_left; int bytes_copied; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { memcpy(&temp, &from->ipaddr, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) return -ENOMEM; if (copy_to_user(to, &temp, addrlen)) return -EFAULT; to += addrlen; cnt++; space_left -= addrlen; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) return -EFAULT; bytes_copied = ((char __user *)to) - optval; if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, size_t space_left, int *bytes_copied) { struct sctp_sockaddr_entry *addr; union sctp_addr temp; int cnt = 0; int addrlen; struct net *net = sock_net(sk); rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if ((PF_INET == sk->sk_family) && (AF_INET6 == addr->a.sa.sa_family)) continue; if ((PF_INET6 == sk->sk_family) && inet_v6_ipv6only(sk) && (AF_INET == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); if (!temp.v4.sin_port) temp.v4.sin_port = htons(port); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sctp_sk(sk), &temp); if (space_left < addrlen) { cnt = -ENOMEM; break; } memcpy(to, &temp, addrlen); to += addrlen; cnt++; space_left -= addrlen; *bytes_copied += addrlen; } rcu_read_unlock(); return cnt; } static int sctp_getsockopt_local_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; int err = 0; size_t space_left; int bytes_copied = 0; void *addrs; void *buf; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound * addresses are returned without regard to any particular * association. */ if (0 == getaddrs.assoc_id) { bp = &sctp_sk(sk)->ep->base.bind_addr; } else { asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; bp = &asoc->base.bind_addr; } to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); if (!addrs) return -ENOMEM; /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid * addresses from the global local address list. */ if (sctp_list_single_entry(&bp->address_list)) { addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(sk, &addr->a)) { cnt = sctp_copy_laddrs(sk, bp->port, addrs, space_left, &bytes_copied); if (cnt < 0) { err = cnt; goto out; } goto copy_getaddrs; } } buf = addrs; /* Protection on the bound address list is not needed since * in the socket option context we hold a socket lock and * thus the bound address list can't change. */ list_for_each_entry(addr, &bp->address_list, list) { memcpy(&temp, &addr->a, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) { err = -ENOMEM; /*fixme: right error?*/ goto out; } memcpy(buf, &temp, addrlen); buf += addrlen; bytes_copied += addrlen; cnt++; space_left -= addrlen; } copy_getaddrs: if (copy_to_user(to, addrs, bytes_copied)) { err = -EFAULT; goto out; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { err = -EFAULT; goto out; } if (put_user(bytes_copied, optlen)) err = -EFAULT; out: kfree(addrs); return err; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prim prim; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_prim)) return -EINVAL; len = sizeof(struct sctp_prim); if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.primary_path) return -ENOTCONN; memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, asoc->peer.primary_path->af_specific->sockaddr_len); sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, (union sctp_addr *)&prim.ssp_addr); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; } /* * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) * * Requests that the local endpoint set the specified Adaptation Layer * Indication parameter for all future INIT and INIT-ACK exchanges. */ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_setadaptation adaptation; if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; len = sizeof(struct sctp_setadaptation); adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; return 0; } /* * * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. * * For getsockopt, it get the default sctp_sndrcvinfo structure. */ static int sctp_getsockopt_default_send_param(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.sinfo_stream = asoc->default_stream; info.sinfo_flags = asoc->default_flags; info.sinfo_ppid = asoc->default_ppid; info.sinfo_context = asoc->default_context; info.sinfo_timetolive = asoc->default_timetolive; } else { info.sinfo_stream = sp->default_stream; info.sinfo_flags = sp->default_flags; info.sinfo_ppid = sp->default_ppid; info.sinfo_context = sp->default_context; info.sinfo_timetolive = sp->default_timetolive; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.snd_sid = asoc->default_stream; info.snd_flags = asoc->default_flags; info.snd_ppid = asoc->default_ppid; info.snd_context = asoc->default_context; } else { info.snd_sid = sp->default_stream; info.snd_flags = sp->default_flags; info.snd_ppid = sp->default_ppid; info.snd_context = sp->default_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* * * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_getsockopt_nodelay(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->nodelay == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; len = sizeof(struct sctp_rtoinfo); if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values corresponding to the specific association. */ if (asoc) { rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); } else { /* Values corresponding to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); rtoinfo.srto_initial = sp->rtoinfo.srto_initial; rtoinfo.srto_max = sp->rtoinfo.srto_max; rtoinfo.srto_min = sp->rtoinfo.srto_min; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &rtoinfo, len)) return -EFAULT; return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_getsockopt_associnfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; if (len < sizeof (struct sctp_assocparams)) return -EINVAL; len = sizeof(struct sctp_assocparams); if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values correspoinding to the specific association */ if (asoc) { assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt++; } assocparams.sasoc_number_peer_destinations = cnt; } else { /* Values corresponding to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; assocparams.sasoc_cookie_life = sp->assocparams.sasoc_cookie_life; assocparams.sasoc_number_peer_destinations = sp->assocparams. sasoc_number_peer_destinations; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &assocparams, len)) return -EFAULT; return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sp->v4mapped; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * (chapter and verse is quoted at sctp_setsockopt_context()) */ static int sctp_getsockopt_context(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->default_rcv_context; } else { params.assoc_value = sp->default_rcv_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &params, len)) return -EFAULT; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_getsockopt_maxseg(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, sizeof(params))) return -EFAULT; } else return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) params.assoc_value = asoc->frag_point; else params.assoc_value = sctp_sk(sk)->user_frag; if (put_user(len, optlen)) return -EFAULT; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) */ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sctp_sk(sk)->frag_interleave; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.25. Set or Get the sctp partial delivery point * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) */ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, char __user *optval, int __user *optlen) { u32 val; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); val = sctp_sk(sk)->pd_point; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * (chapter and verse is quoted at sctp_setsockopt_maxburst()) */ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->max_burst; } else params.assoc_value = sp->max_burst; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; int i; if (!ep->auth_enable) return -EACCES; hmacs = ep->auth_hmacs_list; data_len = ntohs(hmacs->param_hdr.length) - sizeof(struct sctp_paramhdr); if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; len = sizeof(struct sctp_hmacalgo) + data_len; num_idents = data_len / sizeof(u16); if (put_user(len, optlen)) return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; for (i = 0; i < num_idents; i++) { __u16 hmacid = ntohs(hmacs->hmac_ids[i]); if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) return -EFAULT; } return 0; } static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) val.scact_keynumber = asoc->active_key_id; else val.scact_keynumber = ep->active_key_id; len = sizeof(struct sctp_authkeyid); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc) return -EINVAL; ch = asoc->peer.peer_chunks; if (!ch) goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); if (len < num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; else ch = ep->auth_chunk_list; if (!ch) goto num; num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } /* * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) * This option gets the current number of associations that are attached * to a one-to-many style socket. The option value is an uint32_t. */ static int sctp_getsockopt_assoc_number(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; u32 val = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { val++; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.1.23 SCTP_AUTO_ASCONF * See the corresponding setsockopt entry as description */ static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.2.6. Get the Current Identifiers of Associations * (SCTP_GET_ASSOC_ID_LIST) * * This option gets the current list of SCTP association identifiers of * the SCTP associations handled by a one-to-many style socket. */ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_assoc_ids *ids; u32 num = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(struct sctp_assoc_ids)) return -EINVAL; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { num++; } if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) return -EINVAL; len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; ids = kmalloc(len, GFP_USER | __GFP_NOWARN); if (unlikely(!ids)) return -ENOMEM; ids->gaids_number_of_ids = num; num = 0; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { ids->gaids_assoc_id[num++] = asoc->assoc_id; } if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { kfree(ids); return -EFAULT; } kfree(ids); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to fetch the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, char __user *optval, int len, int __user *optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (len < sizeof(struct sctp_paddrthlds)) return -EINVAL; len = sizeof(struct sctp_paddrthlds); if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; val.spt_pathpfthld = asoc->pf_retrans; val.spt_pathmaxrxt = asoc->pathmaxrxt; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * SCTP_GET_ASSOC_STATS * * This option retrieves local per endpoint statistics. It is modeled * after OpenSolaris' implementation */ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; if (put_user(len, optlen)) return -EFAULT; pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvrcvinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvnxtinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_pr_supported(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(params)) { retval = -EINVAL; goto out; } len = sizeof(params); if (copy_from_user(&params, optval, len)) goto out; asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { params.assoc_value = asoc->prsctp_enable; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); params.assoc_value = sp->ep->prsctp_enable; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &params, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_default_prinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(info)) { retval = -EINVAL; goto out; } len = sizeof(info); if (copy_from_user(&info, optval, len)) goto out; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { info.pr_policy = SCTP_PR_POLICY(asoc->default_flags); info.pr_value = asoc->default_timetolive; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); info.pr_policy = SCTP_PR_POLICY(sp->default_flags); info.pr_value = sp->default_timetolive; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &info, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prstatus params; struct sctp_association *asoc; int policy; int retval = -EINVAL; if (len < sizeof(params)) goto out; len = sizeof(params); if (copy_from_user(&params, optval, len)) { retval = -EFAULT; goto out; } policy = params.sprstat_policy; if (policy & ~SCTP_PR_SCTP_MASK) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); if (!asoc) goto out; if (policy == SCTP_PR_SCTP_NONE) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { params.sprstat_abandoned_unsent += asoc->abandoned_unsent[policy]; params.sprstat_abandoned_sent += asoc->abandoned_sent[policy]; } } else { params.sprstat_abandoned_unsent = asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)]; params.sprstat_abandoned_sent = asoc->abandoned_sent[__SCTP_PR_INDEX(policy)]; } if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &params, len)) { retval = -EFAULT; goto out; } retval = 0; out: return retval; } static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_stream_out *streamout; struct sctp_association *asoc; struct sctp_prstatus params; int retval = -EINVAL; int policy; if (len < sizeof(params)) goto out; len = sizeof(params); if (copy_from_user(&params, optval, len)) { retval = -EFAULT; goto out; } policy = params.sprstat_policy; if (policy & ~SCTP_PR_SCTP_MASK) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); if (!asoc || params.sprstat_sid >= asoc->stream.outcnt) goto out; streamout = &asoc->stream.out[params.sprstat_sid]; if (policy == SCTP_PR_SCTP_NONE) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { params.sprstat_abandoned_unsent += streamout->abandoned_unsent[policy]; params.sprstat_abandoned_sent += streamout->abandoned_sent[policy]; } } else { params.sprstat_abandoned_unsent = streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)]; params.sprstat_abandoned_sent = streamout->abandoned_sent[__SCTP_PR_INDEX(policy)]; } if (put_user(len, optlen) || copy_to_user(optval, &params, len)) { retval = -EFAULT; goto out; } retval = 0; out: return retval; } static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(params)) { retval = -EINVAL; goto out; } len = sizeof(params); if (copy_from_user(&params, optval, len)) goto out; asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { params.assoc_value = asoc->reconf_enable; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); params.assoc_value = sp->ep->reconf_enable; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &params, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_enable_strreset(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(params)) { retval = -EINVAL; goto out; } len = sizeof(params); if (copy_from_user(&params, optval, len)) goto out; asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { params.assoc_value = asoc->strreset_enable; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); params.assoc_value = sp->ep->strreset_enable; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &params, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int retval = 0; int len; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of getsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->getsockopt(sk, level, optname, optval, optlen); return retval; } if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case SCTP_STATUS: retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_getsockopt_disable_fragments(sk, len, optval, optlen); break; case SCTP_EVENTS: retval = sctp_getsockopt_events(sk, len, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF: retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF_FLAGS: retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_getsockopt_peer_addr_params(sk, len, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_getsockopt_delayed_ack(sk, len, optval, optlen); break; case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: retval = sctp_getsockopt_peer_addrs(sk, len, optval, optlen); break; case SCTP_GET_LOCAL_ADDRS: retval = sctp_getsockopt_local_addrs(sk, len, optval, optlen); break; case SCTP_SOCKOPT_CONNECTX3: retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_getsockopt_default_send_param(sk, len, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_getsockopt_default_sndinfo(sk, len, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); break; case SCTP_NODELAY: retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDR_INFO: retval = sctp_getsockopt_peer_addr_info(sk, len, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_getsockopt_adaptation_layer(sk, len, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; case SCTP_AUTH_KEY: case SCTP_AUTH_CHUNK: case SCTP_AUTH_DELETE_KEY: retval = -EOPNOTSUPP; break; case SCTP_HMAC_IDENT: retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_getsockopt_active_key(sk, len, optval, optlen); break; case SCTP_PEER_AUTH_CHUNKS: retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, optlen); break; case SCTP_LOCAL_AUTH_CHUNKS: retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_NUMBER: retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_ID_LIST: retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); break; case SCTP_GET_ASSOC_STATS: retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_getsockopt_default_prinfo(sk, len, optval, optlen); break; case SCTP_PR_ASSOC_STATUS: retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, optlen); break; case SCTP_PR_STREAM_STATUS: retval = sctp_getsockopt_pr_streamstatus(sk, len, optval, optlen); break; case SCTP_RECONFIG_SUPPORTED: retval = sctp_getsockopt_reconfig_supported(sk, len, optval, optlen); break; case SCTP_ENABLE_STREAM_RESET: retval = sctp_getsockopt_enable_strreset(sk, len, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); return retval; } static int sctp_hash(struct sock *sk) { /* STUB */ return 0; } static void sctp_unhash(struct sock *sk) { /* STUB */ } /* Check if port is acceptable. Possibly find first available port. * * The port hash table (contained in the 'global' SCTP protocol storage * returned by struct sctp_protocol *sctp_get_protocol()). The hash * table is an array of 4096 lists (sctp_bind_hashbucket). Each * list (the list number is the port number hashed out, so as you * would expect from a hash function, all the ports in a given list have * such a number that hashes out to the same list number; you were * expecting that, right?); so each list has a set of ports, with a * link to the socket (struct sock) that uses it, the port number and * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; int ret; snum = ntohs(addr->v4.sin_port); pr_debug("%s: begins, snum:%d\n", __func__, snum); local_bh_disable(); if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; unsigned int rover; struct net *net = sock_net(sk); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; do { rover++; if ((rover < low) || (rover > high)) rover = low; if (inet_is_local_reserved_port(net, rover)) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: spin_unlock(&head->lock); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's * mutex. */ snum = rover; } else { /* We are given an specific port number; we verify * that it is not being used. If it is used, we will * exahust the search in the hash list corresponding * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } pp = NULL; goto pp_not_found; pp_found: if (!hlist_empty(&pp->owner)) { /* We had a port hash table hit - there is an * available port (pp != NULL) and it is being * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port * (pp->port) [via the pointers bind_next and * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, * we get the endpoint they describe and run through * the endpoint's list of IP (v4 or v6) addresses, * comparing each of the addresses with the address of * the socket sk. If we find a match, then that means * that this port/socket (sk) combination are already * in an endpoint. */ sk_for_each_bound(sk2, &pp->owner) { struct sctp_endpoint *ep2; ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || (reuse && sk2->sk_reuse && sk2->sk_state != SCTP_SS_LISTENING)) continue; if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sctp_sk(sk2), sctp_sk(sk))) { ret = (long)sk2; goto fail_unlock; } } pr_debug("%s: found a match\n", __func__); } pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock; /* In either case (hit or miss), make sure fastreuse is 1 only * if sk->sk_reuse is too (that is, if the caller requested * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table * entry, tie the socket list information with the rest of the * sockets FIXME: Blurry, NPI (ipg). */ success: if (!sctp_sk(sk)->bind_hash) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &pp->owner); sctp_sk(sk)->bind_hash = pp; } ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral * port is requested. */ static int sctp_get_port(struct sock *sk, unsigned short snum) { union sctp_addr addr; struct sctp_af *af = sctp_sk(sk)->pf->af; /* Set up a dummy address struct from the sk. */ af->from_sk(&addr, sk); addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ return !!sctp_get_port_local(sk, &addr); } /* * Move a socket to LISTENING state. */ static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct crypto_shash *tfm = NULL; char alg[32]; /* Allocate HMAC for generating cookie. */ if (!sp->hmac && sp->sctp_hmac_alg) { sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); tfm = crypto_alloc_shash(alg, 0, 0); if (IS_ERR(tfm)) { net_info_ratelimited("failed to load transform for %s: %ld\n", sp->sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; } /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system * picks an ephemeral port and will choose an address set equivalent * to binding with a wildcard address. * * This is not currently spelled out in the SCTP sockets * extensions draft, but follows the practice as seen in TCP * sockets. * */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) return -EAGAIN; } else { if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } } sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* * 4.1.3 / 5.1.3 listen() * * By default, new associations are not accepted for UDP style sockets. * An application uses listen() to mark a socket as being able to * accept new associations. * * On TCP style sockets, applications use listen() to ready the SCTP * endpoint for accepting inbound associations. * * On both types of endpoints a backlog of '0' disables listening. * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) return err; lock_sock(sk); /* Peeled-off sockets are not allowed to listen(). */ if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) goto out; if (sock->state != SS_UNCONNECTED) goto out; if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) goto out; /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) goto out; err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; if (sk->sk_reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } /* If we are already listening, just update the backlog */ if (sctp_sstate(sk, LISTENING)) sk->sk_max_ack_backlog = backlog; else { err = sctp_listen_start(sk, backlog); if (err) goto out; } err = 0; out: release_sock(sk); return err; } /* * This function is done by modeling the current datagram_poll() and the * tcp_poll(). Note that, based on these implementations, we don't * lock the socket in this function, even though it seems that, * ideally, locking or some other mechanisms can be used to ensure * the integrity of the counters (sndbuf and wmem_alloc) used * in this place. We assume that we don't need locks either until proven * otherwise. * * Another thing to note is that we include the Async I/O support * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); sock_rps_record_flow(sk); /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? (POLLIN | POLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) return mask; /* Is it writable? */ if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and * before the bit is set. This could cause a lost I/O * signal. tcp_poll() has a race breaker for this race * condition. Based on their implementation, we put * in the following code to cover it as well. */ if (sctp_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } return mask; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp; pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); if (pp) { SCTP_DBG_OBJCNT_INC(bind_bucket); pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; } /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { if (pp && hlist_empty(&pp->owner)) { __hlist_del(&pp->node); kmem_cache_free(sctp_bucket_cachep, pp); SCTP_DBG_OBJCNT_DEC(bind_bucket); } } /* Release this socket's reference to a local port. */ static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp; spin_lock(&head->lock); pp = sctp_sk(sk)->bind_hash; __sk_del_bind_node(sk); sctp_sk(sk)->bind_hash = NULL; inet_sk(sk)->inet_num = 0; sctp_bucket_destroy(pp); spin_unlock(&head->lock); } void sctp_put_port(struct sock *sk) { local_bh_disable(); __sctp_put_port(sk); local_bh_enable(); } /* * The system picks an ephemeral port and choose an address set equivalent * to binding with a wildcard address. * One of those addresses will be the primary address for the association. * This automatically enables the multihoming capability of SCTP. */ static int sctp_autobind(struct sock *sk) { union sctp_addr autoaddr; struct sctp_af *af; __be16 port; /* Initialize a local sockaddr structure to INADDR_ANY. */ af = sctp_sk(sk)->pf->af; port = htons(inet_sk(sk)->inet_num); af->inaddr_any(&autoaddr, port); return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); } /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. * * From RFC 2292 * 4.2 The cmsghdr Structure * * * When ancillary data is sent or received, any number of ancillary data * objects can be specified by the msg_control and msg_controllen members of * the msghdr structure, because each object is preceded by * a cmsghdr structure defining the object's length (the cmsg_len member). * Historically Berkeley-derived implementations have passed only one object * at a time, but this API allows multiple objects to be * passed in a single call to sendmsg() or recvmsg(). The following example * shows two ancillary data objects in a control buffer. * * |<--------------------------- msg_controllen -------------------------->| * | | * * |<----- ancillary data object ----->|<----- ancillary data object ----->| * * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| * | | | * * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | * * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | * | | | | | * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| * * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * ^ * | * * msg_control * points here */ static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs) { struct msghdr *my_msg = (struct msghdr *)msg; struct cmsghdr *cmsg; for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; /* Should we parse this header or ignore? */ if (cmsg->cmsg_level != IPPROTO_SCTP) continue; /* Strictly check lengths following example in SCM code. */ switch (cmsg->cmsg_type) { case SCTP_INIT: /* SCTP Socket API Extension * 5.3.1 SCTP Initiation Structure (SCTP_INIT) * * This cmsghdr structure provides information for * initializing new SCTP associations with sendmsg(). * The SCTP_INITMSG socket option uses this same data * structure. This structure is not used for * recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) return -EINVAL; cmsgs->init = CMSG_DATA(cmsg); break; case SCTP_SNDRCV: /* SCTP Socket API Extension * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) * * This cmsghdr structure specifies SCTP options for * sendmsg() and describes SCTP header information * about a received message through recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) return -EINVAL; cmsgs->srinfo = CMSG_DATA(cmsg); if (cmsgs->srinfo->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; case SCTP_SNDINFO: /* SCTP Socket API Extension * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) * * This cmsghdr structure specifies SCTP options for * sendmsg(). This structure and SCTP_RCVINFO replaces * SCTP_SNDRCV which has been deprecated. * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ --------------------- * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) return -EINVAL; cmsgs->sinfo = CMSG_DATA(cmsg); if (cmsgs->sinfo->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; default: return -EINVAL; } } return 0; } /* * Wait for a packet.. * Note: This function is the same function as in core/datagram.c * with a few modifications to make lksctp work. */ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT(wait); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out; if (!skb_queue_empty(&sk->sk_receive_queue)) goto ready; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; /* Sequenced packets can come disconnected. If so we report the * problem. */ error = -ENOTCONN; /* Is there a good reason to think that we may receive some data? */ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) goto out; /* Handle signals. */ if (signal_pending(current)) goto interrupted; /* Let another process have a go. Since we are going to sleep * anyway. Note: This may cause odd behaviors if the message * does not fit in the user's buffer, but this seems to be the * only way to honor MSG_DONTWAIT realistically. */ release_sock(sk); *timeo_p = schedule_timeout(*timeo_p); lock_sock(sk); ready: finish_wait(sk_sleep(sk), &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); *err = error; return error; } /* Receive a datagram. * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { skb = skb_peek(&sk->sk_receive_queue); if (skb) refcount_inc(&skb->users); } else { skb = __skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk_can_busy_loop(sk)) { sk_busy_loop(sk, noblock); if (!skb_queue_empty(&sk->sk_receive_queue)) continue; } /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; } /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; if (sctp_wspace(asoc) <= 0) return; if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); if (sctp_writeable(sk)) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq) { if (waitqueue_active(&wq->wait)) wake_up_interruptible(&wq->wait); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } } static void sctp_wake_up_waiters(struct sock *sk, struct sctp_association *asoc) { struct sctp_association *tmp = asoc; /* We do accounting for the sndbuf space per association, * so we only need to wake our own association. */ if (asoc->ep->sndbuf_policy) return __sctp_write_space(asoc); /* If association goes down and is just flushing its * outq, then just normally notify others. */ if (asoc->base.dead) return sctp_write_space(sk); /* Accounting for the sndbuf space is per socket, so we * need to wake up others, try to be fair and in case of * other associations, let them have a go first instead * of just doing a sctp_write_space() call. * * Note that we reach sctp_wake_up_waiters() only when * associations free up queued chunks, thus we are under * lock and the list of associations on a socket is * guaranteed not to change. */ for (tmp = list_next_entry(tmp, asocs); 1; tmp = list_next_entry(tmp, asocs)) { /* Manually skip the head element. */ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) continue; /* Wake up association. */ __sctp_write_space(tmp); /* We've reached the end. */ if (tmp == asoc) break; } } /* Do accounting for the sndbuf space. * Decrement the used sndbuf space of the corresponding association by the * data size which was just transmitted(freed). */ static void sctp_wfree(struct sk_buff *skb) { struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); /* * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); sctp_wake_up_waiters(sk, asoc); sctp_association_put(asoc); } /* Do accounting for the receive space on the socket. * Accounting for the association is done in ulpevent.c * We set this as a destructor for the cloned data skbs so that * accounting is done at the correct time. */ void sctp_sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* * Mimic the behavior of sock_rfree */ sk_mem_uncharge(sk, event->rmem_len); } /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; } void sctp_data_ready(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { struct sctp_association *asoc; /* Wake up the tasks in each wait queue. */ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { __sctp_write_space(asoc); } } /* Is there any sndbuf space available on the socket? * * Note that sk_wmem_alloc is the sum of the send buffers on all of the * associations on the same socket. For a UDP-style socket with * multiple associations, it is possible for it to be "unwriteable" * prematurely. I assume that this is acceptable because * a premature "unwriteable" is better than an accidental "writeable" which * would cause an unwanted block under certain circumstances. For the 1-1 * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ static int sctp_writeable(struct sock *sk) { int amt = 0; amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, * returns immediately with EINPROGRESS. */ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); /* Increment the association's refcnt. */ sctp_association_hold(asoc); for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (sctp_state(asoc, ESTABLISHED)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: if (asoc->init_err_counter + 1 > asoc->max_init_attempts) err = -ETIMEDOUT; else err = -ECONNREFUSED; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EINPROGRESS; goto out; } static int sctp_wait_for_accept(struct sock *sk, long timeo) { struct sctp_endpoint *ep; int err = 0; DEFINE_WAIT(wait); ep = sctp_sk(sk)->ep; for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&ep->asocs)) { release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); } err = -EINVAL; if (!sctp_sstate(sk, LISTENING)) break; err = 0; if (!list_empty(&ep->asocs)) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } static void sctp_wait_for_close(struct sock *sk, long timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&sctp_sk(sk)->ep->asocs)) break; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) { struct sk_buff *frag; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) sctp_skb_set_owner_r_frag(frag, sk); done: sctp_skb_set_owner_r(skb, sk); } void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc) { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; newsk->sk_tsflags = sk->sk_tsflags; newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_sndbuf = sk->sk_sndbuf; newsk->sk_rcvbuf = sk->sk_rcvbuf; newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; newsk->sk_rxhash = sk->sk_rxhash; newinet = inet_sk(newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for * getsockname() and getpeername() */ newinet->inet_sport = inet->inet_sport; newinet->inet_saddr = inet->inet_saddr; newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; newinet->inet_id = asoc->next_tsn ^ jiffies; newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) net_enable_timestamp(); security_sk_clone(sk, newsk); } static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { int ancestor_size = sizeof(struct inet_sock) + sizeof(struct sctp_sock) - offsetof(struct sctp_sock, auto_asconf_list); if (sk_from->sk_family == PF_INET6) ancestor_size += sizeof(struct ipv6_pinfo); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, enum sctp_socket_type type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ sctp_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; spin_lock_bh(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; spin_unlock_bh(&head->lock); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) { newsk->sk_state = SCTP_SS_CLOSED; newsk->sk_shutdown |= RCV_SHUTDOWN; } else { newsk->sk_state = SCTP_SS_ESTABLISHED; } release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #if IS_ENABLED(CONFIG_IPV6) #include <net/transp_v6.h> static void sctp_v6_destroy_sock(struct sock *sk) { sctp_destroy_sock(sk); inet6_destroy_sock(sk); } struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_v6_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #endif /* IS_ENABLED(CONFIG_IPV6) */
./CrossVul/dataset_final_sorted/CWE-416/c/bad_2838_0
crossvul-cpp_data_bad_5021_5
/* * Extension Header handling for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Andi Kleen <ak@muc.de> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Changes: * yoshfuji : ensure not to overrun while parsing * tlv options. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). * YOSHIFUJI Hideaki @USAGI Register inbound extension header * handlers as inet6_protocol{}. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/slab.h> #include <linux/export.h> #include <net/dst.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #if IS_ENABLED(CONFIG_IPV6_MIP6) #include <net/xfrm.h> #endif #include <linux/uaccess.h> /* * Parsing tlv encoded headers. * * Parsing function "func" returns true, if parsing succeed * and false, if it failed. * It MUST NOT touch skb->h. */ struct tlvtype_proc { int type; bool (*func)(struct sk_buff *skb, int offset); }; /********************* Generic functions *********************/ /* An unknown option is detected, decide what to do */ static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) { switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ return true; case 1: /* drop packet */ break; case 3: /* Send ICMP if not a multicast address and drop packet */ /* Actually, it is redundant check. icmp_send will recheck in any case. */ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) break; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); return false; } kfree_skb(skb); return false; } /* Parse tlv encoded option header (hop-by-hop or destination) */ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) { const struct tlvtype_proc *curr; const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); int len = (skb_transport_header(skb)[1] + 1) << 3; int padlen = 0; if (skb_transport_offset(skb) + len > skb_headlen(skb)) goto bad; off += 2; len -= 2; while (len > 0) { int optlen = nh[off + 1] + 2; int i; switch (nh[off]) { case IPV6_TLV_PAD1: optlen = 1; padlen++; if (padlen > 7) goto bad; break; case IPV6_TLV_PADN: /* RFC 2460 states that the purpose of PadN is * to align the containing header to multiples * of 8. 7 is therefore the highest valid value. * See also RFC 4942, Section 2.1.9.5. */ padlen += optlen; if (padlen > 7) goto bad; /* RFC 4942 recommends receiving hosts to * actively check PadN payload to contain * only zeroes. */ for (i = 2; i < optlen; i++) { if (nh[off + i] != 0) goto bad; } break; default: /* Other TLV code so scan list */ if (optlen > len) goto bad; for (curr = procs; curr->type >= 0; curr++) { if (curr->type == nh[off]) { /* type specific length/alignment checks will be performed in the func(). */ if (curr->func(skb, off) == false) return false; break; } } if (curr->type < 0) { if (ip6_tlvopt_unknown(skb, off) == 0) return false; } padlen = 0; break; } off += optlen; len -= optlen; } if (len == 0) return true; bad: kfree_skb(skb); return false; } /***************************** Destination options header. *****************************/ #if IS_ENABLED(CONFIG_IPV6_MIP6) static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) { struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct in6_addr tmp_addr; int ret; if (opt->dsthao) { net_dbg_ratelimited("hao duplicated\n"); goto discard; } opt->dsthao = opt->dst1; opt->dst1 = 0; hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { net_dbg_ratelimited("hao invalid option length = %d\n", hao->length); goto discard; } if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { net_dbg_ratelimited("hao is not an unicast addr: %pI6\n", &hao->addr); goto discard; } ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr, (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS); if (unlikely(ret < 0)) goto discard; if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto discard; /* update all variable using below by copied skbuff */ hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); ipv6h = ipv6_hdr(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; tmp_addr = ipv6h->saddr; ipv6h->saddr = hao->addr; hao->addr = tmp_addr; if (skb->tstamp.tv64 == 0) __net_timestamp(skb); return true; discard: kfree_skb(skb); return false; } #endif static const struct tlvtype_proc tlvprocdestopt_lst[] = { #if IS_ENABLED(CONFIG_IPV6_MIP6) { .type = IPV6_TLV_HAO, .func = ipv6_dest_hao, }, #endif {-1, NULL} }; static int ipv6_destopt_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) __u16 dstbuf; #endif struct dst_entry *dst = skb_dst(skb); if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } opt->lastopt = opt->dst1 = skb_network_header_len(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) dstbuf = opt->dst1; #endif if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) opt->nhoff = dstbuf; #else opt->nhoff = opt->dst1; #endif return 1; } IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); return -1; } /******************************** Routing header. ********************************/ /* called with rcu_read_lock() */ static int ipv6_rthdr_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); struct in6_addr *addr = NULL; struct in6_addr daddr; struct inet6_dev *idev; int n, i; struct ipv6_rt_hdr *hdr; struct rt0_hdr *rthdr; struct net *net = dev_net(skb->dev); int accept_source_route = net->ipv6.devconf_all->accept_source_route; idev = __in6_dev_get(skb->dev); if (idev && accept_source_route > idev->cnf.accept_source_route) accept_source_route = idev->cnf.accept_source_route; if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || skb->pkt_type != PACKET_HOST) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } looped_back: if (hdr->segments_left == 0) { switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: /* Silently discard type 2 header unless it was * processed by own */ if (!addr) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } opt->lastopt = opt->srcrt = skb_network_header_len(skb); skb->transport_header += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (accept_source_route < 0) goto unknown_rh; /* Silently discard invalid RTH type 2 */ if (hdr->hdrlen != 2 || hdr->segments_left != 1) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } break; #endif default: goto unknown_rh; } /* * This is the routing header forwarding algorithm from * RFC 2460, page 16. */ n = hdr->hdrlen >> 1; if (hdr->segments_left > n) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } /* We are about to mangle packet header. Be careful! Do not damage packets queued somewhere. */ if (skb_cloned(skb)) { /* the copy is a forwarded packet */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; i = n - --hdr->segments_left; rthdr = (struct rt0_hdr *) hdr; addr = rthdr->addr; addr += i - 1; switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr, IPPROTO_ROUTING) < 0) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } if (ipv6_addr_is_multicast(addr)) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } daddr = *addr; *addr = ipv6_hdr(skb)->daddr; ipv6_hdr(skb)->daddr = daddr; skb_dst_drop(skb); ip6_route_input(skb); if (skb_dst(skb)->error) { skb_push(skb, skb->data - skb_network_header(skb)); dst_input(skb); return -1; } if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; goto looped_back; } skb_push(skb, skb->data - skb_network_header(skb)); dst_input(skb); return -1; unknown_rh: IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb_network_header(skb)); return -1; } static const struct inet6_protocol rthdr_protocol = { .handler = ipv6_rthdr_rcv, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol destopt_protocol = { .handler = ipv6_destopt_rcv, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol nodata_protocol = { .handler = dst_discard, .flags = INET6_PROTO_NOPOLICY, }; int __init ipv6_exthdrs_init(void) { int ret; ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING); if (ret) goto out; ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS); if (ret) goto out_rthdr; ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE); if (ret) goto out_destopt; out: return ret; out_destopt: inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); out_rthdr: inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); goto out; }; void ipv6_exthdrs_exit(void) { inet6_del_protocol(&nodata_protocol, IPPROTO_NONE); inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); } /********************************** Hop-by-hop options. **********************************/ /* * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input(). */ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) { return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); } static inline struct net *ipv6_skb_net(struct sk_buff *skb) { return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev); } /* Router Alert as of RFC 2711 */ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] == 2) { IP6CB(skb)->flags |= IP6SKB_ROUTERALERT; memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); return true; } net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n", nh[optoff + 1]); kfree_skb(skb); return false; } /* Jumbo payload */ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); struct net *net = ipv6_skb_net(skb); u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", nh[optoff+1]); IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); goto drop; } pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); return false; } if (ipv6_hdr(skb)->payload_len) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); return false; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; return true; drop: kfree_skb(skb); return false; } static const struct tlvtype_proc tlvprochopopt_lst[] = { { .type = IPV6_TLV_ROUTERALERT, .func = ipv6_hop_ra, }, { .type = IPV6_TLV_JUMBO, .func = ipv6_hop_jumbo, }, { -1, } }; int ipv6_parse_hopopts(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); /* * skb_network_header(skb) is equal to skb->data, and * skb_network_header_len(skb) is always equal to * sizeof(struct ipv6hdr) by definition of * hop-by-hop options. */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + ((skb_transport_header(skb)[1] + 1) << 3)))) { kfree_skb(skb); return -1; } opt->flags |= IP6SKB_HOPBYHOP; if (ip6_parse_tlv(tlvprochopopt_lst, skb)) { skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); opt->nhoff = sizeof(struct ipv6hdr); return 1; } return -1; } /* * Creating outbound headers. * * "build" functions work when skb is filled from head to tail (datagram) * "push" functions work when headers are added from tail to head (tcp) * * In both cases we assume, that caller reserved enough room * for headers. */ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p) { struct rt0_hdr *phdr, *ihdr; int hops; ihdr = (struct rt0_hdr *) opt; phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); hops = ihdr->rt_hdr.hdrlen >> 1; if (hops > 1) memcpy(phdr->addr, ihdr->addr + 1, (hops - 1) * sizeof(struct in6_addr)); phdr->addr[hops - 1] = **addr_p; *addr_p = ihdr->addr; phdr->rt_hdr.nexthdr = *proto; *proto = NEXTHDR_ROUTING; } static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) { struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt)); memcpy(h, opt, ipv6_optlen(opt)); h->nexthdr = *proto; *proto = type; } void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto, struct in6_addr **daddr) { if (opt->srcrt) { ipv6_push_rthdr(skb, proto, opt->srcrt, daddr); /* * IPV6_RTHDRDSTOPTS is ignored * unless IPV6_RTHDR is set (RFC3542). */ if (opt->dst0opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); } if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } EXPORT_SYMBOL(ipv6_push_nfrag_opts); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) { if (opt->dst1opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); } struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) { struct ipv6_txoptions *opt2; opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); if (opt2) { long dif = (char *)opt2 - (char *)opt; memcpy(opt2, opt, opt->tot_len); if (opt2->hopopt) *((char **)&opt2->hopopt) += dif; if (opt2->dst0opt) *((char **)&opt2->dst0opt) += dif; if (opt2->dst1opt) *((char **)&opt2->dst1opt) += dif; if (opt2->srcrt) *((char **)&opt2->srcrt) += dif; } return opt2; } EXPORT_SYMBOL_GPL(ipv6_dup_options); static int ipv6_renew_option(void *ohdr, struct ipv6_opt_hdr __user *newopt, int newoptlen, int inherit, struct ipv6_opt_hdr **hdr, char **p) { if (inherit) { if (ohdr) { memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); *hdr = (struct ipv6_opt_hdr *)*p; *p += CMSG_ALIGN(ipv6_optlen(*hdr)); } } else { if (newopt) { if (copy_from_user(*p, newopt, newoptlen)) return -EFAULT; *hdr = (struct ipv6_opt_hdr *)*p; if (ipv6_optlen(*hdr) > newoptlen) return -EINVAL; *p += CMSG_ALIGN(newoptlen); } } return 0; } struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr __user *newopt, int newoptlen) { int tot_len = 0; char *p; struct ipv6_txoptions *opt2; int err; if (opt) { if (newtype != IPV6_HOPOPTS && opt->hopopt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); if (newtype != IPV6_RTHDR && opt->srcrt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); if (newtype != IPV6_DSTOPTS && opt->dst1opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); } if (newopt && newoptlen) tot_len += CMSG_ALIGN(newoptlen); if (!tot_len) return NULL; tot_len += sizeof(*opt2); opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC); if (!opt2) return ERR_PTR(-ENOBUFS); memset(opt2, 0, tot_len); opt2->tot_len = tot_len; p = (char *)(opt2 + 1); err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, newtype != IPV6_HOPOPTS, &opt2->hopopt, &p); if (err) goto out; err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, newtype != IPV6_RTHDRDSTOPTS, &opt2->dst0opt, &p); if (err) goto out; err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, newtype != IPV6_RTHDR, (struct ipv6_opt_hdr **)&opt2->srcrt, &p); if (err) goto out; err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen, newtype != IPV6_DSTOPTS, &opt2->dst1opt, &p); if (err) goto out; opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0); opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); return opt2; out: sock_kfree_s(sk, opt2, opt2->tot_len); return ERR_PTR(err); } struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) { /* * ignore the dest before srcrt unless srcrt is being included. * --yoshfuji */ if (opt && opt->dst0opt && !opt->srcrt) { if (opt_space != opt) { memcpy(opt_space, opt, sizeof(*opt_space)); opt = opt_space; } opt->opt_nflen -= ipv6_optlen(opt->dst0opt); opt->dst0opt = NULL; } return opt; } EXPORT_SYMBOL_GPL(ipv6_fixup_options); /** * fl6_update_dst - update flowi destination address with info given * by srcrt option, if any. * * @fl6: flowi6 for which daddr is to be updated * @opt: struct ipv6_txoptions in which to look for srcrt opt * @orig: copy of original daddr address if modified * * Returns NULL if no txoptions or no srcrt, otherwise returns orig * and initial value of fl6->daddr set in orig */ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt, struct in6_addr *orig) { if (!opt || !opt->srcrt) return NULL; *orig = fl6->daddr; fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; return orig; } EXPORT_SYMBOL_GPL(fl6_update_dst);
./CrossVul/dataset_final_sorted/CWE-416/c/bad_5021_5
crossvul-cpp_data_bad_3348_6
/* Copyright (c) 2014. The YARA Authors. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <yara/sizedstr.h> int sized_string_cmp( SIZED_STRING* s1, SIZED_STRING* s2) { size_t i = 0; while (s1->length > i && s2->length > i && s1->c_string[i] == s2->c_string[i]) { i++; } if (i == s1->length && i == s2->length) return 0; else if (i == s1->length) return -1; else if (i == s2->length) return 1; else if (s1->c_string[i] < s2->c_string[i]) return -1; else return 1; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_3348_6
crossvul-cpp_data_good_819_4
// SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <generated/utsrelease.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include "trace.h" #include "trace_output.h" /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ bool ring_buffer_expanded; /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If a tracer is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; /* Pipe tracepoints to printk */ struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ static DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static int __init set_cmdline_ftrace(char *str) { strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = true; return 1; } __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { allocate_snapshot = true; /* We also need the main ring buffer expanded */ ring_buffer_expanded = true; return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 0; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 0; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; int ret = -ENODEV; mutex_lock(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; ret = 0; break; } } mutex_unlock(&trace_types_lock); return ret; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } void trace_array_put(struct trace_array *this_tr) { mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); } int call_filter_check_discard(struct trace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && !filter_match_preds(call->filter, rec)) { __trace_event_discard_commit(buffer, event); return 1; } return 0; } void trace_free_pid_list(struct trace_pid_list *pid_list) { vfree(pid_list->pids); kfree(pid_list); } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { /* * If pid_max changed after filtered_pids was created, we * by default ignore all pids greater than the previous pid_max. */ if (search_pid >= filtered_pids->pid_max) return false; return test_bit(search_pid, filtered_pids->pids); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* * Return false, because if filtered_pids does not exist, * all pids are good to trace. */ if (!filtered_pids) return false; return !trace_find_filtered_pid(filtered_pids, task->pid); } /** * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* Sorry, but we don't support pid_max changing after setting */ if (task->pid >= pid_list->pid_max) return; /* "self" is set for forks, and NULL for exits */ if (self) set_bit(task->pid, pid_list->pids); else clear_bit(task->pid, pid_list->pids); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { unsigned long pid = (unsigned long)v; (*pos)++; /* pid already is +1 of the actual prevous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ if (pid < pid_list->pid_max) return (void *)(pid + 1); return NULL; } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; loff_t l = 0; pid = find_first_bit(pid_list->pids, pid_list->pid_max); if (pid >= pid_list->pid_max) return NULL; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret = 0; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return -ENOMEM; pid_list->pid_max = READ_ONCE(pid_max); /* Only truncating will shrink pid_max */ if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) pid_list->pid_max = filtered_pids->pid_max; pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); if (!pid_list->pids) { kfree(pid_list); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ for_each_set_bit(pid, filtered_pids->pids, filtered_pids->pid_max) { set_bit(pid, pid_list->pids); nr_pids++; } } while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0 || !trace_parser_loaded(&parser)) break; read += ret; ubuf += ret; cnt -= ret; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; if (val >= pid_list->pid_max) break; pid = (pid_t)val; set_bit(pid, pid_list->pids); nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_free_pid_list(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_free_pid_list(pid_list); read = ret; pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer, cpu); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been disabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned long flags, int pc) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, flags, pc); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_on(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); } else ring_buffer_unlock_commit(buffer, event); } /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return size; } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return 1; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT void tracing_snapshot_instance(struct trace_array *tr) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); internal_trace_puts("*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); internal_trace_puts("*** stopping trace here! ***\n"); tracing_off(); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id()); local_irq_restore(flags); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/debug/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; if (!tr->allocated_snapshot) { /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_off(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) tracing_off(); } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) return ring_buffer_record_is_on(tr->trace_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* nr_entries can not be zero */ if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit postions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; ret = read; goto out; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto out; } *ppos += read; ret = read; out: return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->seq.readpos) return -EBUSY; len = trace_seq_used(s) - s->seq.readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->seq.readpos, cnt); s->seq.readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_buffer *trace_buf = &tr->trace_buffer; struct trace_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from trace_buffer */ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr - tracer * @tsk - task with the latency * @cpu - the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ static int wait_on_pipe(struct trace_iterator *iter, int full) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, full); } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->trace_buffer); tr->current_trace = type; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; mutex_lock(&trace_types_lock); if (list_empty(&postponed_selftests)) goto out; pr_info("Running postponed tracer tests:\n"); list_for_each_entry_safe(p, n, &postponed_selftests, list) { ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } out: mutex_unlock(&trace_types_lock); return 0; } core_initcall(init_trace_selftests); #else static inline int run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. * @type - the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } mutex_lock(&trace_types_lock); tracing_selftest_running = true; for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) { /*allocate a dummy tracer_flags*/ type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); if (!type->flags) { ret = -ENOMEM; goto out; } type->flags->val = 0; type->flags->opts = dummy_tracer_opt; } else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; /* store the tracer for __set_tracer_option */ type->flags->trace = type; ret = run_tracer_selftest(type); if (ret < 0) goto out; type->next = trace_types; trace_types = type; add_tracer_options(&global_trace, type); out: tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ tracing_selftest_disabled = true; #ifdef CONFIG_FTRACE_STARTUP_TEST printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", type->name); #endif out_unlock: return ret; } void tracing_reset(struct trace_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct trace_buffer *buf) { struct ring_buffer *buffer = buf->buffer; int cpu; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); for_each_online_cpu(cpu) ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus(void) { struct trace_array *tr; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } static int *tgid_map; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; char *saved_cmdlines; }; static struct saved_cmdlines_buffer *savedcmd; /* temporary disable recording */ static atomic_t trace_record_taskinfo_disabled __read_mostly; static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; } static inline void set_cmdline(int idx, const char *cmdline) { memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); } static int allocate_cmdlines_buffer(unsigned int val, struct saved_cmdlines_buffer *s) { s->map_cmdline_to_pid = kmalloc_array(val, sizeof(*s->map_cmdline_to_pid), GFP_KERNEL); if (!s->map_cmdline_to_pid) return -ENOMEM; s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); if (!s->saved_cmdlines) { kfree(s->map_cmdline_to_pid); return -ENOMEM; } s->cmdline_idx = 0; s->cmdline_num = val; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); return 0; } static int trace_create_savedcmd(void) { int ret; savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); if (!savedcmd) return -ENOMEM; ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); if (ret < 0) { kfree(savedcmd); savedcmd = NULL; return -ENOMEM; } return 0; } int is_tracing_stopped(void) { return global_trace.stop_count; } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (--global_trace.stop_count) { if (global_trace.stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); global_trace.stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_start_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; /* If global, we need to also start the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_start(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (tr->stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); tr->stop_count = 0; } goto out; } buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { struct ring_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_stop_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; /* If global, we need to also stop the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_stop(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (tr->stop_count++) goto out; buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } static int trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(tsk->pid > PID_MAX_DEFAULT)) return 0; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; idx = savedcmd->map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; /* * Check whether the cmdline buffer at idx has a pid * mapped. We are going to overwrite that entry so we * need to clear the map_pid_to_cmdline. Otherwise we * would read the new comm for the old pid. */ pid = savedcmd->map_cmdline_to_pid[idx]; if (pid != NO_CMDLINE_MAP) savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; savedcmd->map_cmdline_to_pid[idx] = tsk->pid; savedcmd->map_pid_to_cmdline[tsk->pid] = idx; savedcmd->cmdline_idx = idx; } set_cmdline(idx, tsk->comm); arch_spin_unlock(&trace_cmdline_lock); return 1; } static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; if (!pid) { strcpy(comm, "<idle>"); return; } if (WARN_ON_ONCE(pid < 0)) { strcpy(comm, "<XXX>"); return; } if (pid > PID_MAX_DEFAULT) { strcpy(comm, "<...>"); return; } map = savedcmd->map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); else strcpy(comm, "<...>"); } void trace_find_cmdline(int pid, char comm[]) { preempt_disable(); arch_spin_lock(&trace_cmdline_lock); __trace_find_cmdline(pid, comm); arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } int trace_find_tgid(int pid) { if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) return 0; return tgid_map[pid]; } static int trace_save_tgid(struct task_struct *tsk) { /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) return 0; tgid_map[tsk->pid] = tsk->tgid; return 1; } static bool tracing_record_taskinfo_skip(int flags) { if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) return true; if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) return true; if (!__this_cpu_read(trace_taskinfo_save)) return true; return false; } /** * tracing_record_taskinfo - record the task info of a task * * @task - task to record * @flags - TRACE_RECORD_CMDLINE for recording comm * - TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo(struct task_struct *task, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * * @prev - previous task during sched_switch * @next - next task during sched_switch * @flags - TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /* Helpers to record a specific task information */ void tracing_record_cmdline(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); } void tracing_record_tgid(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_TGID); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | #else TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto failed; event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; preempt_disable(); if (cpu == smp_processor_id() && this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); preempt_enable(); } return; failed: trace_buffered_event_disable(); } static void enable_trace_buffered_event(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; preempt_disable(); /* For each CPU, set the buffer as used. */ smp_call_function_many(tracing_buffer_mask, disable_trace_buffered_event, NULL, 1); preempt_enable(); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Make sure trace_buffered_event is NULL before clearing * trace_buffered_event_cnt. */ smp_wmb(); preempt_disable(); /* Do the work on each cpu */ smp_call_function_many(tracing_buffer_mask, enable_trace_buffered_event, NULL, 1); preempt_enable(); } static struct ring_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *entry; int val; *current_rb = trace_file->tr->trace_buffer.buffer; if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && (entry = this_cpu_read(trace_buffered_event))) { /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); if (val == 1) { trace_event_setup(entry, type, flags, pc); entry->array[0] = len; return entry; } this_cpu_dec(trace_buffered_event_cnt); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the tigger to use. It's recusive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; event = &fbuffer->trace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; mutex_lock(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) goto out; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); out: mutex_unlock(&tracepoint_printk_mutex); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event) { struct trace_entry *entry; unsigned int size = 0; entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); static inline void ftrace_exports_enable(void) { static_branch_enable(&ftrace_exports_enabled); } static inline void ftrace_exports_disable(void) { static_branch_disable(&ftrace_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event) { struct trace_export *export; preempt_disable_notrace(); export = rcu_dereference_raw_notrace(ftrace_exports_list); while (export) { trace_process_export(export, event); export = rcu_dereference_raw_notrace(export->next); } preempt_enable_notrace(); } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { if (*list == NULL) ftrace_exports_enable(); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); if (*list == NULL) ftrace_exports_disable(); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; mutex_lock(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { int ret; mutex_lock(&ftrace_export_lock); ret = rm_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return ret; } EXPORT_SYMBOL_GPL(unregister_ftrace_export); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct trace_event_call *call = &event_function; struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (!call_filter_check_discard(call, entry, buffer, event)) { if (static_branch_unlikely(&ftrace_exports_enabled)) ftrace_exports(event); __buffer_unlock_commit(buffer, event); } } #ifdef CONFIG_STACKTRACE #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) struct ftrace_stack { unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; }; static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; int use_stack; int size = FTRACE_STACK_ENTRIES; trace.nr_entries = 0; trace.skip = skip; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) trace.skip++; #endif /* * Since events can happen in NMIs there's no safe way to * use the per cpu ftrace_stacks. We reserve it and if an interrupt * or NMI comes in, it will just have to use the default * FTRACE_STACK_SIZE. */ preempt_disable_notrace(); use_stack = __this_cpu_inc_return(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would * have exited and put the counter back to what we want. * We just need a barrier to keep gcc from moving things * around. */ barrier(); if (use_stack == 1) { trace.entries = this_cpu_ptr(ftrace_stack.calls); trace.max_entries = FTRACE_STACK_MAX_ENTRIES; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); if (trace.nr_entries > size) size = trace.nr_entries; } else /* From now on, use_stack is a boolean */ use_stack = 0; size *= sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); memset(&entry->caller, 0, size); if (use_stack) memcpy(&entry->caller, trace.entries, trace.nr_entries * sizeof(unsigned long)); else { trace.max_entries = FTRACE_STACK_ENTRIES; trace.entries = entry->caller; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); } entry->size = trace.nr_entries; if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { struct ring_buffer *buffer = tr->trace_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); return; } /* * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and rcu_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; rcu_irq_enter_irqson(); __ftrace_trace_stack(buffer, flags, skip, pc, NULL); rcu_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { unsigned long flags; if (tracing_disabled || tracing_selftest_running) return; local_save_flags(flags); #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, skip, preempt_count(), NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = 0; trace.entries = entry->caller; save_stack_trace_user(&trace); if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED static void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, flags, preempt_count()); } #endif /* UNUSED */ #endif /* CONFIG_STACKTRACE */ /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct *trace_percpu_buffer; /* * Thise allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct *buffers; buffers = alloc_percpu(struct trace_buffer_struct); if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.trace_buffer.buffer) tracing_start_cmdline_record(); } void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_bprint; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; unsigned long flags; char *tbuffer; int len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out; local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; int len = 0, size, pc; struct print_entry *entry; unsigned long flags; char *tbuffer; if (tracing_disabled || tracing_selftest_running) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); local_save_flags(flags); size = sizeof(*entry) + len + 1; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } __printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } __printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_read(buf_iter, NULL); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, lost_events); if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct ring_buffer *buffer = iter->trace_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, NULL, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { if (ts >= iter->trace_buffer->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; /* * copy the tracer to avoid using a global lock all around. * iter->trace is a copy of current_trace, the pointer to the * name may be used instead of a strcmp(), as iter->trace->name * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) *iter->trace = *tr->current_trace; mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (!iter->snapshot) atomic_inc(&trace_record_taskinfo_disabled); if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif if (!iter->snapshot) atomic_dec(&trace_record_taskinfo_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries(struct trace_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long count; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total += count; } else *total += count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries += count; } } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off \n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / delay \n" "# cmd pid ||||| time | caller \n" "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; const char tgid_space[] = " "; const char space[] = " "; print_event_info(buf, m); seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? tgid_space : space); seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct trace_buffer *buf = iter->trace_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = "preemption"; name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, #if defined(CONFIG_PREEMPT_NONE) "server", #elif defined(CONFIG_PREEMPT_VOLUNTARY) "desktop", #elif defined(CONFIG_PREEMPT) "preempt", #else "unknown", #endif /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) return event->funcs->trace(iter, sym_flags, event); trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->trace_buffer, m, trace_flags); else print_func_help_header(iter->trace_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { print_trace_line(iter); ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ mutex_lock(&trace_types_lock); iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) goto fail; *iter->trace = *tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->trace_buffer = &tr->max_buffer; else #endif iter->trace_buffer = &tr->trace_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* stop the trace while dumping if we are not opening "snapshot" */ if (!iter->snapshot) tracing_stop_tr(tr); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); kfree(iter->buffer_iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ static int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; filp->private_data = inode->i_private; return 0; } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (!iter->snapshot) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter->buffer_iter); seq_release_private(inode, file); return 0; } static int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct trace_buffer *trace_buf = &tr->trace_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; if (tracing_disabled) return -ENODEV; ret = seq_open(file, &show_traces_seq_ops); if (ret) return ret; m = file->private_data; m->private = tr; return 0; } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: kfree(mask_str); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err, cpu; if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); free_cpumask_var(tracing_cpumask_new); return count; err_unlock: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; u32 tracer_flags; int i; mutex_lock(&trace_types_lock); tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } mutex_unlock(&trace_types_lock); return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret; ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { if (!tgid_map) tgid_map = kcalloc(PID_MAX_DEFAULT + 1, sizeof(*tgid_map), GFP_KERNEL); if (!tgid_map) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; } trace_event_enable_tgid_record(enabled); } if (mask == TRACE_ITER_EVENT_FORK) trace_event_follow_fork(tr, enabled); if (mask == TRACE_ITER_FUNC_FORK) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif } if (mask == TRACE_ITER_PRINTK) { trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); } return 0; } static int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t-change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t-view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by adding a suffix 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t -:[<group>/]<event>\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n" #else "\t $stack<index>, $stack, $retval, $comm\n" #endif "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>,\n" "\t <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [if <filter>]\n\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analagous to\n" "\t the enable_event and disable_event triggers.\n" #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) { int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { if (trace_find_tgid(*ptr)) return ptr; } return NULL; } static void *saved_tgids_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; if (!tgid_map) return NULL; v = &tgid_map[0]; while (l <= *pos) { v = saved_tgids_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_tgids_stop(struct seq_file *m, void *v) { } static int saved_tgids_show(struct seq_file *m, void *v) { int pid = (int *)v - tgid_map; seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); return 0; } static const struct seq_operations tracing_saved_tgids_seq_ops = { .start = saved_tgids_start, .stop = saved_tgids_stop, .next = saved_tgids_next, .show = saved_tgids_show, }; static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_tgids_seq_ops); } static const struct file_operations tracing_saved_tgids_fops = { .open = tracing_saved_tgids_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) { unsigned int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; ptr++) { if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) continue; return ptr; } return NULL; } static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; preempt_disable(); arch_spin_lock(&trace_cmdline_lock); v = &savedcmd->map_cmdline_to_pid[0]; while (l <= *pos) { v = saved_cmdlines_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_cmdlines_stop(struct seq_file *m, void *v) { arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } static int saved_cmdlines_show(struct seq_file *m, void *v) { char buf[TASK_COMM_LEN]; unsigned int *pid = v; __trace_find_cmdline(*pid, buf); seq_printf(m, "%d %s\n", *pid, buf); return 0; } static const struct seq_operations tracing_saved_cmdlines_seq_ops = { .start = saved_cmdlines_start, .next = saved_cmdlines_next, .stop = saved_cmdlines_stop, .show = saved_cmdlines_show, }; static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_cmdlines_seq_ops); } static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_saved_cmdlines_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static ssize_t tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) { kfree(s->saved_cmdlines); kfree(s->map_cmdline_to_pid); kfree(s); } static int tracing_resize_saved_cmdlines(unsigned int val) { struct saved_cmdlines_buffer *s, *savedcmd_temp; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; if (allocate_cmdlines_buffer(val, s) < 0) { kfree(s); return -ENOMEM; } arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); free_saved_cmdlines_buffer(savedcmd_temp); return 0; } static ssize_t tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry or less than PID_MAX_DEFAULT */ if (!val || val > PID_MAX_DEFAULT) return -EINVAL; ret = tracing_resize_saved_cmdlines((unsigned int)val); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations tracing_saved_cmdlines_size_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_size_read, .write = tracing_saved_cmdlines_size_write, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; (*pos)++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } mutex_lock(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); mutex_unlock(&trace_eval_mutex); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", 0444, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_insert_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; if (len <= 0) return; map = start; trace_event_eval_update(map, len); trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); r = sprintf(buf, "%s\n", tr->current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->trace_buffer); return t->init(tr); } static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ ring_buffer_expanded = true; /* May be called before buffers are initialized */ if (!tr->trace_buffer.buffer) return 0; ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); if (ret < 0) return ret; #ifdef CONFIG_TRACER_MAX_TRACE if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || !tr->current_trace->use_max_tr) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->trace_buffer, &tr->trace_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } return ret; } if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->max_buffer, size); else per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; out: #endif /* CONFIG_TRACER_MAX_TRACE */ if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->trace_buffer, size); else per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; return ret; } static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { int ret = size; mutex_lock(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { ret = -EINVAL; goto out; } } ret = __tracing_resize_ring_buffer(tr, size, cpu_id); if (ret < 0) ret = -ENOMEM; out: mutex_unlock(&trace_types_lock); return ret; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(void) { int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer); /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; } static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; create_trace_option_files(tr, t); } static int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) goto out; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -EINVAL; goto out; } if (t == tr->current_trace) goto out; /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); goto out; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(t, tr)) { ret = -EINVAL; goto out; } /* If trace pipe files are being read, we can't change the tracer */ if (tr->current_trace->ref) { ret = -EBUSY; goto out; } trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->allocated_snapshot; if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); } #endif #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } #endif if (t->init) { ret = tracer_init(t, tr); if (ret) goto out; } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); return ret; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; err = tracing_set_tracer(tr, buf); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; mutex_lock(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) goto out; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) goto out; } ret = cnt; out: mutex_unlock(&trace_types_lock); return ret; } #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); } #endif static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; mutex_lock(&trace_types_lock); /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; __trace_array_put(tr); goto out; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->trace_buffer = &tr->trace_buffer; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->current_trace->ref++; out: mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter->trace); kfree(iter); __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); tr->current_trace->ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); mutex_destroy(&iter->mutex); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER_BLOCK) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, filp, poll_table); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) goto out; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static const struct pipe_buf_operations tracing_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), trace_seq_used(&iter->seq)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = trace_seq_used(&iter->seq); trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; const char faulted[] = "<faulted>"; ssize_t written; int size; int len; /* Used in tracing_mark_raw_write() as well */ #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ /* If less than "<faulted>", then make sure we can still add that */ if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (unlikely(!event)) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); if (len) { memcpy(&entry->buf, faulted, FAULTED_SIZE); cnt = FAULTED_SIZE; written = -EFAULT; } else written = cnt; len = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); if (written > 0) *fpos += written; return written; } /* Limit it for now to 3K (including tag) */ #define RAW_DATA_MAX_SIZE (1024*3) static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; struct ring_buffer *buffer; struct raw_data_entry *entry; const char faulted[] = "<faulted>"; unsigned long irq_flags; ssize_t written; int size; int len; #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt; if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, irq_flags, preempt_count()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); if (len) { entry->id = -1; memcpy(&entry->buf, faulted, FAULTED_SIZE); written = -EFAULT; } else written = cnt; __buffer_unlock_commit(buffer, event); if (written > 0) *fpos += written; return written; } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; mutex_lock(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; mutex_lock(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); mutex_unlock(&trace_types_lock); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) { int ret = 0; mutex_lock(&trace_types_lock); if (abs && tr->time_stamp_abs_ref++) goto out; if (!abs) { if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { ret = -EINVAL; goto out; } if (--tr->time_stamp_abs_ref) goto out; } ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); #endif out: mutex_unlock(&trace_types_lock); return ret; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->trace_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; mutex_lock(&trace_types_lock); if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto out; } switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } #endif if (!tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) update_max_tr(tr, current, smp_processor_id()); else update_max_tr_single(tr, current, iter->cpu_file); local_irq_enable(); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } out: mutex_unlock(&trace_types_lock); return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.trace_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_raw_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; #endif /* CONFIG_TRACER_SNAPSHOT */ static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.trace_buffer = &tr->trace_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->current_trace->ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->trace_buffer->buffer, &info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = PAGE_SIZE - info->read; if (size > count) size = count; ret = copy_to_user(ubuf, info->spare + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; mutex_lock(&trace_types_lock); iter->tr->current_trace->ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare_cpu, info->spare); kfree(info); mutex_unlock(&trace_types_lock); return 0; } struct buffer_ref { struct ring_buffer *buffer; void *page; int cpu; int ref; }; static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); buf->private = 0; } static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (ref->ref > INT_MAX/2) return false; ref->ref++; return true; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (*ppos & (PAGE_SIZE - 1)) return -EINVAL; if (len & (PAGE_SIZE - 1)) { if (len < PAGE_SIZE) return -EINVAL; len &= PAGE_MASK; } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, &ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ref->page); spd.pages[i] = page; spd.partial[i].len = PAGE_SIZE; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += PAGE_SIZE; entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->tr->buffer_percent); if (ret) goto out; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct trace_buffer *trace_buf = &tr->trace_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer, cpu)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *p = filp->private_data; char buf[64]; /* Not too big for a shallow stack */ int r; r = scnprintf(buf, 63, "%ld", *p); buf[r++] = '\n'; return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') return unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; ret = register_ftrace_function_probe(glob, tr, ops, count); out: return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); WARN_ONCE(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", 0444, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, tr, cpu, &snapshot_raw_fops); #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, 0644, t_options, topt, &trace_options_fops); } static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; int i; if (!tracer) return; flags = tracer->flags; if (!flags || !flags->opts) return; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return; for (i = 0; i < tr->nr_topts; i++) { /* Make sure there's no duplicate flags. */ if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) return; } opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); WARN_ONCE(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, 0644, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) create_trace_option_core_file(tr, trace_options[i], i); } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->trace_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { mutex_lock(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); } mutex_unlock(&trace_types_lock); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; if (!val) val = 1; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) { enum ring_buffer_flags rb_flags; rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; buf->tr = tr; buf->buffer = ring_buffer_alloc(size, rb_flags); if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->trace_buffer, ring_buffer_size(tr->trace_buffer.buffer, 0)); return 0; } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; /* * Only the top level trace array gets its snapshot allocated * from the kernel command line. */ allocate_snapshot = false; #endif return 0; } static void free_trace_buffer(struct trace_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static void __update_tracer_options(struct trace_array *tr) { struct tracer *t; for (t = trace_types; t; t = t->next) add_tracer_options(tr, t); } static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -EEXIST; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) goto out_unlock; } ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) goto out_unlock; tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; tr->current_trace = &nop_trace; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; tr->dir = tracefs_create_dir(name, trace_instance_dir); if (!tr->dir) goto out_free_tr; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove_recursive(tr->dir); goto out_free_tr; } ftrace_init_trace_array(tr); init_tracer_tracefs(tr, tr->dir); init_trace_flags_index(tr); __update_tracer_options(tr); list_add(&tr->list, &ftrace_trace_arrays); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return 0; out_free_tr: free_trace_buffers(tr); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static int instance_rmdir(const char *name) { struct trace_array *tr; int found = 0; int ret; int i; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -ENODEV; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { found = 1; break; } } if (!found) goto out_unlock; ret = -EBUSY; if (tr->ref || (tr->current_trace && tr->current_trace->ref)) goto out_unlock; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1 << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1 << i, 0); } tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove_recursive(tr->dir); free_trace_buffers(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); ret = 0; out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static __init void create_trace_instances(struct dentry *d_tracer) { trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (WARN_ON(!trace_instance_dir)) return; } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { struct trace_event_file *file; int cpu; trace_create_file("available_tracers", 0444, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", 0644, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", 0644, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", 0644, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", 0644, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); file = __find_event_file(tr, "ftrace", "print"); if (file && file->dir) trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); tr->trace_marker_file = file; trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", 0644, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", 0444, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", 0444, d_tracer, tr, &buffer_percent_fops); create_trace_options_dir(tr); #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) trace_create_file("tracing_max_latency", 0644, d_tracer, &tr->max_latency, &tracing_max_lat_fops); #endif if (ftrace_create_function_files(tr, d_tracer)) WARN(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, tr, &snapshot_fops); #endif for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; mnt = vfs_submount(mntpt, type, "tracefs", NULL); put_filesystem(type); if (IS_ERR(mnt)) return NULL; mntget(mnt); return mnt; } /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ struct dentry *tracing_init_dentry(void) { struct trace_array *tr = &global_trace; /* The top level trace array uses NULL as parent */ if (tr->dir) return NULL; if (WARN_ON(!tracefs_initialized()) || (IS_ENABLED(CONFIG_DEBUG_FS) && WARN_ON(!debugfs_initialized()))) return ERR_PTR(-ENODEV); /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kerenl. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); if (!tr->dir) { pr_warn_once("Could not create debugfs directory 'tracing'\n"); return ERR_PTR(-ENOMEM); } return NULL; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static void __init trace_eval_init(void) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); } #ifdef CONFIG_MODULES static void trace_module_add_evals(struct module *mod) { if (!mod->num_trace_evals) return; /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; mutex_lock(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) goto out; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); out: mutex_unlock(&trace_eval_mutex); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); break; } return 0; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init int tracer_init_tracefs(void) { struct dentry *d_tracer; trace_access_lock_init(); d_tracer = tracing_init_dentry(); if (IS_ERR(d_tracer)) return 0; event_trace_init(); init_tracer_tracefs(&global_trace, d_tracer); ftrace_init_tracefs_toplevel(&global_trace, d_tracer); trace_create_file("tracing_thresh", 0644, d_tracer, &global_trace, &tracing_thresh_fops); trace_create_file("README", 0444, d_tracer, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", 0444, d_tracer, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", 0644, d_tracer, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", 0444, d_tracer, NULL, &tracing_saved_tgids_fops); trace_eval_init(); trace_create_eval_file(d_tracer); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif create_trace_instances(d_tracer); update_tracer_options(&global_trace); return 0; } static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); return NOTIFY_OK; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } void trace_init_global_iter(struct trace_iterator *iter) { iter->tr = &global_trace; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->trace_buffer = &global_trace.trace_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; static atomic_t dump_running; struct trace_array *tr = &global_trace; unsigned int old_userobj; unsigned long flags; int cnt = 0, cpu; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracing_off(); local_irq_save(flags); printk_nmi_direct_enter(); /* Simulate the iterator */ trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = RING_BUFFER_ALL_CPUS; break; case DUMP_ORIG: iter.cpu_file = raw_smp_processor_id(); break; case DUMP_NONE: goto out_enable; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); iter.cpu_file = RING_BUFFER_ALL_CPUS; } printk(KERN_TRACE "Dumping ftrace buffer:\n"); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); out_enable: tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); printk_nmi_direct_exit(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ftrace_dump); int trace_run_command(const char *buf, int (*createfn)(int, char **)) { char **argv; int argc, ret; argc = 0; ret = 0; argv = argv_split(GFP_KERNEL, buf, &argc); if (!argv) return -ENOMEM; if (argc) ret = createfn(argc, argv); argv_free(argv); return ret; } #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char **)) { char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); ret = -EINVAL; goto out; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = trace_run_command(buf, createfn); if (ret) goto out; buf += size; } while (done < count); } ret = done; out: kfree(kbuf); return ret; } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; /* * Make sure we don't accidently add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:preapre", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); goto out_free_savedcmd; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; ftrace_init_global_array_ops(&global_trace); init_trace_flags_index(&global_trace); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); list_add(&global_trace.list, &ftrace_trace_arrays); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_savedcmd: free_saved_cmdlines_buffer(savedcmd); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (WARN_ON(!tracepoint_print_iter)) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); } void __init trace_init(void) { trace_event_init(); } __init static int clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return 0; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; return 0; } fs_initcall(tracer_init_tracefs); late_initcall_sync(clear_boot_tracer); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static int tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } return 0; } late_initcall_sync(tracing_set_default_clock); #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_819_4
crossvul-cpp_data_bad_4045_0
/* * Apple HTTP Live Streaming demuxer * Copyright (c) 2010 Martin Storsjo * Copyright (c) 2013 Anssi Hannula * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Apple HTTP Live Streaming demuxer * http://tools.ietf.org/html/draft-pantos-http-live-streaming */ #include "libavutil/avstring.h" #include "libavutil/avassert.h" #include "libavutil/intreadwrite.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" #include "libavutil/dict.h" #include "libavutil/time.h" #include "avformat.h" #include "internal.h" #include "avio_internal.h" #include "url.h" #include "id3v2.h" #define INITIAL_BUFFER_SIZE 32768 #define MAX_FIELD_LEN 64 #define MAX_CHARACTERISTICS_LEN 512 #define MPEG_TIME_BASE 90000 #define MPEG_TIME_BASE_Q (AVRational){1, MPEG_TIME_BASE} /* * An apple http stream consists of a playlist with media segment files, * played sequentially. There may be several playlists with the same * video content, in different bandwidth variants, that are played in * parallel (preferably only one bandwidth variant at a time). In this case, * the user supplied the url to a main playlist that only lists the variant * playlists. * * If the main playlist doesn't point at any variants, we still create * one anonymous toplevel variant for this, to maintain the structure. */ enum KeyType { KEY_NONE, KEY_AES_128, KEY_SAMPLE_AES }; struct segment { int64_t duration; int64_t url_offset; int64_t size; char *url; char *key; enum KeyType key_type; uint8_t iv[16]; /* associated Media Initialization Section, treated as a segment */ struct segment *init_section; }; struct rendition; enum PlaylistType { PLS_TYPE_UNSPECIFIED, PLS_TYPE_EVENT, PLS_TYPE_VOD }; /* * Each playlist has its own demuxer. If it currently is active, * it has an open AVIOContext too, and potentially an AVPacket * containing the next packet from this stream. */ struct playlist { char url[MAX_URL_SIZE]; AVIOContext pb; uint8_t* read_buffer; URLContext *input; AVFormatContext *parent; int index; AVFormatContext *ctx; AVPacket pkt; int stream_offset; int finished; enum PlaylistType type; int64_t target_duration; int start_seq_no; int n_segments; struct segment **segments; int needed, cur_needed; int cur_seq_no; int64_t cur_seg_offset; int64_t last_load_time; /* Currently active Media Initialization Section */ struct segment *cur_init_section; uint8_t *init_sec_buf; unsigned int init_sec_buf_size; unsigned int init_sec_data_len; unsigned int init_sec_buf_read_offset; char key_url[MAX_URL_SIZE]; uint8_t key[16]; /* ID3 timestamp handling (elementary audio streams have ID3 timestamps * (and possibly other ID3 tags) in the beginning of each segment) */ int is_id3_timestamped; /* -1: not yet known */ int64_t id3_mpegts_timestamp; /* in mpegts tb */ int64_t id3_offset; /* in stream original tb */ uint8_t* id3_buf; /* temp buffer for id3 parsing */ unsigned int id3_buf_size; AVDictionary *id3_initial; /* data from first id3 tag */ int id3_found; /* ID3 tag found at some point */ int id3_changed; /* ID3 tag data has changed at some point */ ID3v2ExtraMeta *id3_deferred_extra; /* stored here until subdemuxer is opened */ int64_t seek_timestamp; int seek_flags; int seek_stream_index; /* into subdemuxer stream array */ /* Renditions associated with this playlist, if any. * Alternative rendition playlists have a single rendition associated * with them, and variant main Media Playlists may have * multiple (playlist-less) renditions associated with them. */ int n_renditions; struct rendition **renditions; /* Media Initialization Sections (EXT-X-MAP) associated with this * playlist, if any. */ int n_init_sections; struct segment **init_sections; }; /* * Renditions are e.g. alternative subtitle or audio streams. * The rendition may either be an external playlist or it may be * contained in the main Media Playlist of the variant (in which case * playlist is NULL). */ struct rendition { enum AVMediaType type; struct playlist *playlist; char group_id[MAX_FIELD_LEN]; char language[MAX_FIELD_LEN]; char name[MAX_FIELD_LEN]; int disposition; }; struct variant { int bandwidth; /* every variant contains at least the main Media Playlist in index 0 */ int n_playlists; struct playlist **playlists; char audio_group[MAX_FIELD_LEN]; char video_group[MAX_FIELD_LEN]; char subtitles_group[MAX_FIELD_LEN]; }; typedef struct HLSContext { AVClass *class; int n_variants; struct variant **variants; int n_playlists; struct playlist **playlists; int n_renditions; struct rendition **renditions; int cur_seq_no; int live_start_index; int first_packet; int64_t first_timestamp; int64_t cur_timestamp; AVIOInterruptCB *interrupt_callback; char *user_agent; ///< holds HTTP user agent set as an AVOption to the HTTP protocol context char *cookies; ///< holds HTTP cookie values set in either the initial response or as an AVOption to the HTTP protocol context char *headers; ///< holds HTTP headers set as an AVOption to the HTTP protocol context AVDictionary *avio_opts; char *allowed_extensions; int max_reload; } HLSContext; static int read_chomp_line(AVIOContext *s, char *buf, int maxlen) { int len = ff_get_line(s, buf, maxlen); while (len > 0 && av_isspace(buf[len - 1])) buf[--len] = '\0'; return len; } static void free_segment_list(struct playlist *pls) { int i; for (i = 0; i < pls->n_segments; i++) { av_freep(&pls->segments[i]->key); av_freep(&pls->segments[i]->url); av_freep(&pls->segments[i]); } av_freep(&pls->segments); pls->n_segments = 0; } static void free_init_section_list(struct playlist *pls) { int i; for (i = 0; i < pls->n_init_sections; i++) { av_freep(&pls->init_sections[i]->url); av_freep(&pls->init_sections[i]); } av_freep(&pls->init_sections); pls->n_init_sections = 0; } static void free_playlist_list(HLSContext *c) { int i; for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; free_segment_list(pls); free_init_section_list(pls); av_freep(&pls->renditions); av_freep(&pls->id3_buf); av_dict_free(&pls->id3_initial); ff_id3v2_free_extra_meta(&pls->id3_deferred_extra); av_freep(&pls->init_sec_buf); av_free_packet(&pls->pkt); av_freep(&pls->pb.buffer); if (pls->input) ffurl_close(pls->input); if (pls->ctx) { pls->ctx->pb = NULL; avformat_close_input(&pls->ctx); } av_free(pls); } av_freep(&c->playlists); av_freep(&c->cookies); av_freep(&c->user_agent); c->n_playlists = 0; } static void free_variant_list(HLSContext *c) { int i; for (i = 0; i < c->n_variants; i++) { struct variant *var = c->variants[i]; av_freep(&var->playlists); av_free(var); } av_freep(&c->variants); c->n_variants = 0; } static void free_rendition_list(HLSContext *c) { int i; for (i = 0; i < c->n_renditions; i++) av_freep(&c->renditions[i]); av_freep(&c->renditions); c->n_renditions = 0; } /* * Used to reset a statically allocated AVPacket to a clean slate, * containing no data. */ static void reset_packet(AVPacket *pkt) { av_init_packet(pkt); pkt->data = NULL; } static struct playlist *new_playlist(HLSContext *c, const char *url, const char *base) { struct playlist *pls = av_mallocz(sizeof(struct playlist)); if (!pls) return NULL; reset_packet(&pls->pkt); ff_make_absolute_url(pls->url, sizeof(pls->url), base, url); pls->seek_timestamp = AV_NOPTS_VALUE; pls->is_id3_timestamped = -1; pls->id3_mpegts_timestamp = AV_NOPTS_VALUE; dynarray_add(&c->playlists, &c->n_playlists, pls); return pls; } struct variant_info { char bandwidth[20]; /* variant group ids: */ char audio[MAX_FIELD_LEN]; char video[MAX_FIELD_LEN]; char subtitles[MAX_FIELD_LEN]; }; static struct variant *new_variant(HLSContext *c, struct variant_info *info, const char *url, const char *base) { struct variant *var; struct playlist *pls; pls = new_playlist(c, url, base); if (!pls) return NULL; var = av_mallocz(sizeof(struct variant)); if (!var) return NULL; if (info) { var->bandwidth = atoi(info->bandwidth); strcpy(var->audio_group, info->audio); strcpy(var->video_group, info->video); strcpy(var->subtitles_group, info->subtitles); } dynarray_add(&c->variants, &c->n_variants, var); dynarray_add(&var->playlists, &var->n_playlists, pls); return var; } static void handle_variant_args(struct variant_info *info, const char *key, int key_len, char **dest, int *dest_len) { if (!strncmp(key, "BANDWIDTH=", key_len)) { *dest = info->bandwidth; *dest_len = sizeof(info->bandwidth); } else if (!strncmp(key, "AUDIO=", key_len)) { *dest = info->audio; *dest_len = sizeof(info->audio); } else if (!strncmp(key, "VIDEO=", key_len)) { *dest = info->video; *dest_len = sizeof(info->video); } else if (!strncmp(key, "SUBTITLES=", key_len)) { *dest = info->subtitles; *dest_len = sizeof(info->subtitles); } } struct key_info { char uri[MAX_URL_SIZE]; char method[11]; char iv[35]; }; static void handle_key_args(struct key_info *info, const char *key, int key_len, char **dest, int *dest_len) { if (!strncmp(key, "METHOD=", key_len)) { *dest = info->method; *dest_len = sizeof(info->method); } else if (!strncmp(key, "URI=", key_len)) { *dest = info->uri; *dest_len = sizeof(info->uri); } else if (!strncmp(key, "IV=", key_len)) { *dest = info->iv; *dest_len = sizeof(info->iv); } } struct init_section_info { char uri[MAX_URL_SIZE]; char byterange[32]; }; static struct segment *new_init_section(struct playlist *pls, struct init_section_info *info, const char *url_base) { struct segment *sec; char *ptr; char tmp_str[MAX_URL_SIZE]; if (!info->uri[0]) return NULL; sec = av_mallocz(sizeof(*sec)); if (!sec) return NULL; ff_make_absolute_url(tmp_str, sizeof(tmp_str), url_base, info->uri); sec->url = av_strdup(tmp_str); if (!sec->url) { av_free(sec); return NULL; } if (info->byterange[0]) { sec->size = atoi(info->byterange); ptr = strchr(info->byterange, '@'); if (ptr) sec->url_offset = atoi(ptr+1); } else { /* the entire file is the init section */ sec->size = -1; } dynarray_add(&pls->init_sections, &pls->n_init_sections, sec); return sec; } static void handle_init_section_args(struct init_section_info *info, const char *key, int key_len, char **dest, int *dest_len) { if (!strncmp(key, "URI=", key_len)) { *dest = info->uri; *dest_len = sizeof(info->uri); } else if (!strncmp(key, "BYTERANGE=", key_len)) { *dest = info->byterange; *dest_len = sizeof(info->byterange); } } struct rendition_info { char type[16]; char uri[MAX_URL_SIZE]; char group_id[MAX_FIELD_LEN]; char language[MAX_FIELD_LEN]; char assoc_language[MAX_FIELD_LEN]; char name[MAX_FIELD_LEN]; char defaultr[4]; char forced[4]; char characteristics[MAX_CHARACTERISTICS_LEN]; }; static struct rendition *new_rendition(HLSContext *c, struct rendition_info *info, const char *url_base) { struct rendition *rend; enum AVMediaType type = AVMEDIA_TYPE_UNKNOWN; char *characteristic; char *chr_ptr; char *saveptr; if (!strcmp(info->type, "AUDIO")) type = AVMEDIA_TYPE_AUDIO; else if (!strcmp(info->type, "VIDEO")) type = AVMEDIA_TYPE_VIDEO; else if (!strcmp(info->type, "SUBTITLES")) type = AVMEDIA_TYPE_SUBTITLE; else if (!strcmp(info->type, "CLOSED-CAPTIONS")) /* CLOSED-CAPTIONS is ignored since we do not support CEA-608 CC in * AVC SEI RBSP anyway */ return NULL; if (type == AVMEDIA_TYPE_UNKNOWN) return NULL; /* URI is mandatory for subtitles as per spec */ if (type == AVMEDIA_TYPE_SUBTITLE && !info->uri[0]) return NULL; /* TODO: handle subtitles (each segment has to parsed separately) */ if (type == AVMEDIA_TYPE_SUBTITLE) return NULL; rend = av_mallocz(sizeof(struct rendition)); if (!rend) return NULL; dynarray_add(&c->renditions, &c->n_renditions, rend); rend->type = type; strcpy(rend->group_id, info->group_id); strcpy(rend->language, info->language); strcpy(rend->name, info->name); /* add the playlist if this is an external rendition */ if (info->uri[0]) { rend->playlist = new_playlist(c, info->uri, url_base); if (rend->playlist) dynarray_add(&rend->playlist->renditions, &rend->playlist->n_renditions, rend); } if (info->assoc_language[0]) { int langlen = strlen(rend->language); if (langlen < sizeof(rend->language) - 3) { rend->language[langlen] = ','; strncpy(rend->language + langlen + 1, info->assoc_language, sizeof(rend->language) - langlen - 2); } } if (!strcmp(info->defaultr, "YES")) rend->disposition |= AV_DISPOSITION_DEFAULT; if (!strcmp(info->forced, "YES")) rend->disposition |= AV_DISPOSITION_FORCED; chr_ptr = info->characteristics; while ((characteristic = av_strtok(chr_ptr, ",", &saveptr))) { if (!strcmp(characteristic, "public.accessibility.describes-music-and-sound")) rend->disposition |= AV_DISPOSITION_HEARING_IMPAIRED; else if (!strcmp(characteristic, "public.accessibility.describes-video")) rend->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED; chr_ptr = NULL; } return rend; } static void handle_rendition_args(struct rendition_info *info, const char *key, int key_len, char **dest, int *dest_len) { if (!strncmp(key, "TYPE=", key_len)) { *dest = info->type; *dest_len = sizeof(info->type); } else if (!strncmp(key, "URI=", key_len)) { *dest = info->uri; *dest_len = sizeof(info->uri); } else if (!strncmp(key, "GROUP-ID=", key_len)) { *dest = info->group_id; *dest_len = sizeof(info->group_id); } else if (!strncmp(key, "LANGUAGE=", key_len)) { *dest = info->language; *dest_len = sizeof(info->language); } else if (!strncmp(key, "ASSOC-LANGUAGE=", key_len)) { *dest = info->assoc_language; *dest_len = sizeof(info->assoc_language); } else if (!strncmp(key, "NAME=", key_len)) { *dest = info->name; *dest_len = sizeof(info->name); } else if (!strncmp(key, "DEFAULT=", key_len)) { *dest = info->defaultr; *dest_len = sizeof(info->defaultr); } else if (!strncmp(key, "FORCED=", key_len)) { *dest = info->forced; *dest_len = sizeof(info->forced); } else if (!strncmp(key, "CHARACTERISTICS=", key_len)) { *dest = info->characteristics; *dest_len = sizeof(info->characteristics); } /* * ignored: * - AUTOSELECT: client may autoselect based on e.g. system language * - INSTREAM-ID: EIA-608 closed caption number ("CC1".."CC4") */ } /* used by parse_playlist to allocate a new variant+playlist when the * playlist is detected to be a Media Playlist (not Master Playlist) * and we have no parent Master Playlist (parsing of which would have * allocated the variant and playlist already) * *pls == NULL => Master Playlist or parentless Media Playlist * *pls != NULL => parented Media Playlist, playlist+variant allocated */ static int ensure_playlist(HLSContext *c, struct playlist **pls, const char *url) { if (*pls) return 0; if (!new_variant(c, NULL, url, NULL)) return AVERROR(ENOMEM); *pls = c->playlists[c->n_playlists - 1]; return 0; } static int open_in(HLSContext *c, AVIOContext **in, const char *url) { AVDictionary *tmp = NULL; int ret; av_dict_copy(&tmp, c->avio_opts, 0); ret = avio_open2(in, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp); av_dict_free(&tmp); return ret; } static int url_connect(struct playlist *pls, AVDictionary *opts, AVDictionary *opts2) { AVDictionary *tmp = NULL; int ret; av_dict_copy(&tmp, opts, 0); av_dict_copy(&tmp, opts2, 0); if ((ret = ffurl_connect(pls->input, &tmp)) < 0) { ffurl_close(pls->input); pls->input = NULL; } av_dict_free(&tmp); return ret; } static void update_options(char **dest, const char *name, void *src) { av_freep(dest); av_opt_get(src, name, 0, (uint8_t**)dest); if (*dest && !strlen(*dest)) av_freep(dest); } static int open_url(HLSContext *c, URLContext **uc, const char *url, AVDictionary *opts) { AVDictionary *tmp = NULL; int ret; const char *proto_name = avio_find_protocol_name(url); if (!proto_name) return AVERROR_INVALIDDATA; // only http(s) & file are allowed if (av_strstart(proto_name, "file", NULL)) { if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) { av_log(c, AV_LOG_ERROR, "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n" "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n", url); return AVERROR_INVALIDDATA; } } else if (av_strstart(proto_name, "http", NULL)) { ; } else return AVERROR_INVALIDDATA; if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':') ; else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5)) return AVERROR_INVALIDDATA; av_dict_copy(&tmp, c->avio_opts, 0); av_dict_copy(&tmp, opts, 0); ret = ffurl_open(uc, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp); if( ret >= 0) { // update cookies on http response with setcookies. URLContext *u = *uc; update_options(&c->cookies, "cookies", u->priv_data); av_dict_set(&opts, "cookies", c->cookies, 0); } av_dict_free(&tmp); return ret; } static int parse_playlist(HLSContext *c, const char *url, struct playlist *pls, AVIOContext *in) { int ret = 0, is_segment = 0, is_variant = 0; int64_t duration = 0; enum KeyType key_type = KEY_NONE; uint8_t iv[16] = ""; int has_iv = 0; char key[MAX_URL_SIZE] = ""; char line[MAX_URL_SIZE]; const char *ptr; int close_in = 0; int64_t seg_offset = 0; int64_t seg_size = -1; uint8_t *new_url = NULL; struct variant_info variant_info; char tmp_str[MAX_URL_SIZE]; struct segment *cur_init_section = NULL; if (!in) { #if 1 AVDictionary *opts = NULL; close_in = 1; /* Some HLS servers don't like being sent the range header */ av_dict_set(&opts, "seekable", "0", 0); // broker prior HTTP options that should be consistent across requests av_dict_set(&opts, "user-agent", c->user_agent, 0); av_dict_set(&opts, "cookies", c->cookies, 0); av_dict_set(&opts, "headers", c->headers, 0); ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts); av_dict_free(&opts); if (ret < 0) return ret; #else ret = open_in(c, &in, url); if (ret < 0) return ret; close_in = 1; #endif } if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, &new_url) >= 0) url = new_url; read_chomp_line(in, line, sizeof(line)); if (strcmp(line, "#EXTM3U")) { ret = AVERROR_INVALIDDATA; goto fail; } if (pls) { free_segment_list(pls); pls->finished = 0; pls->type = PLS_TYPE_UNSPECIFIED; } while (!avio_feof(in)) { read_chomp_line(in, line, sizeof(line)); if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) { is_variant = 1; memset(&variant_info, 0, sizeof(variant_info)); ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args, &variant_info); } else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) { struct key_info info = {{0}}; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args, &info); key_type = KEY_NONE; has_iv = 0; if (!strcmp(info.method, "AES-128")) key_type = KEY_AES_128; if (!strcmp(info.method, "SAMPLE-AES")) key_type = KEY_SAMPLE_AES; if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) { ff_hex_to_data(iv, info.iv + 2); has_iv = 1; } av_strlcpy(key, info.uri, sizeof(key)); } else if (av_strstart(line, "#EXT-X-MEDIA:", &ptr)) { struct rendition_info info = {{0}}; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_rendition_args, &info); new_rendition(c, &info, url); } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { ret = ensure_playlist(c, &pls, url); if (ret < 0) goto fail; pls->target_duration = atoi(ptr) * AV_TIME_BASE; } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) { ret = ensure_playlist(c, &pls, url); if (ret < 0) goto fail; pls->start_seq_no = atoi(ptr); } else if (av_strstart(line, "#EXT-X-PLAYLIST-TYPE:", &ptr)) { ret = ensure_playlist(c, &pls, url); if (ret < 0) goto fail; if (!strcmp(ptr, "EVENT")) pls->type = PLS_TYPE_EVENT; else if (!strcmp(ptr, "VOD")) pls->type = PLS_TYPE_VOD; } else if (av_strstart(line, "#EXT-X-MAP:", &ptr)) { struct init_section_info info = {{0}}; ret = ensure_playlist(c, &pls, url); if (ret < 0) goto fail; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_init_section_args, &info); cur_init_section = new_init_section(pls, &info, url); } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) { if (pls) pls->finished = 1; } else if (av_strstart(line, "#EXTINF:", &ptr)) { is_segment = 1; duration = atof(ptr) * AV_TIME_BASE; } else if (av_strstart(line, "#EXT-X-BYTERANGE:", &ptr)) { seg_size = atoi(ptr); ptr = strchr(ptr, '@'); if (ptr) seg_offset = atoi(ptr+1); } else if (av_strstart(line, "#", NULL)) { continue; } else if (line[0]) { if (is_variant) { if (!new_variant(c, &variant_info, line, url)) { ret = AVERROR(ENOMEM); goto fail; } is_variant = 0; } if (is_segment) { struct segment *seg; if (!pls) { if (!new_variant(c, 0, url, NULL)) { ret = AVERROR(ENOMEM); goto fail; } pls = c->playlists[c->n_playlists - 1]; } seg = av_malloc(sizeof(struct segment)); if (!seg) { ret = AVERROR(ENOMEM); goto fail; } seg->duration = duration; seg->key_type = key_type; if (has_iv) { memcpy(seg->iv, iv, sizeof(iv)); } else { int seq = pls->start_seq_no + pls->n_segments; memset(seg->iv, 0, sizeof(seg->iv)); AV_WB32(seg->iv + 12, seq); } if (key_type != KEY_NONE) { ff_make_absolute_url(tmp_str, sizeof(tmp_str), url, key); seg->key = av_strdup(tmp_str); if (!seg->key) { av_free(seg); ret = AVERROR(ENOMEM); goto fail; } } else { seg->key = NULL; } ff_make_absolute_url(tmp_str, sizeof(tmp_str), url, line); seg->url = av_strdup(tmp_str); if (!seg->url) { av_free(seg->key); av_free(seg); ret = AVERROR(ENOMEM); goto fail; } dynarray_add(&pls->segments, &pls->n_segments, seg); is_segment = 0; seg->size = seg_size; if (seg_size >= 0) { seg->url_offset = seg_offset; seg_offset += seg_size; seg_size = -1; } else { seg->url_offset = 0; seg_offset = 0; } seg->init_section = cur_init_section; } } } if (pls) pls->last_load_time = av_gettime_relative(); fail: av_free(new_url); if (close_in) avio_close(in); return ret; } static struct segment *current_segment(struct playlist *pls) { return pls->segments[pls->cur_seq_no - pls->start_seq_no]; } enum ReadFromURLMode { READ_NORMAL, READ_COMPLETE, }; /* read from URLContext, limiting read to current segment */ static int read_from_url(struct playlist *pls, struct segment *seg, uint8_t *buf, int buf_size, enum ReadFromURLMode mode) { int ret; /* limit read if the segment was only a part of a file */ if (seg->size >= 0) buf_size = FFMIN(buf_size, seg->size - pls->cur_seg_offset); if (mode == READ_COMPLETE) ret = ffurl_read_complete(pls->input, buf, buf_size); else ret = ffurl_read(pls->input, buf, buf_size); if (ret > 0) pls->cur_seg_offset += ret; return ret; } /* Parse the raw ID3 data and pass contents to caller */ static void parse_id3(AVFormatContext *s, AVIOContext *pb, AVDictionary **metadata, int64_t *dts, ID3v2ExtraMetaAPIC **apic, ID3v2ExtraMeta **extra_meta) { static const char id3_priv_owner_ts[] = "com.apple.streaming.transportStreamTimestamp"; ID3v2ExtraMeta *meta; ff_id3v2_read_dict(pb, metadata, ID3v2_DEFAULT_MAGIC, extra_meta); for (meta = *extra_meta; meta; meta = meta->next) { if (!strcmp(meta->tag, "PRIV")) { ID3v2ExtraMetaPRIV *priv = meta->data; if (priv->datasize == 8 && !strcmp(priv->owner, id3_priv_owner_ts)) { /* 33-bit MPEG timestamp */ int64_t ts = AV_RB64(priv->data); av_log(s, AV_LOG_DEBUG, "HLS ID3 audio timestamp %"PRId64"\n", ts); if ((ts & ~((1ULL << 33) - 1)) == 0) *dts = ts; else av_log(s, AV_LOG_ERROR, "Invalid HLS ID3 audio timestamp %"PRId64"\n", ts); } } else if (!strcmp(meta->tag, "APIC") && apic) *apic = meta->data; } } /* Check if the ID3 metadata contents have changed */ static int id3_has_changed_values(struct playlist *pls, AVDictionary *metadata, ID3v2ExtraMetaAPIC *apic) { AVDictionaryEntry *entry = NULL; AVDictionaryEntry *oldentry; /* check that no keys have changed values */ while ((entry = av_dict_get(metadata, "", entry, AV_DICT_IGNORE_SUFFIX))) { oldentry = av_dict_get(pls->id3_initial, entry->key, NULL, AV_DICT_MATCH_CASE); if (!oldentry || strcmp(oldentry->value, entry->value) != 0) return 1; } /* check if apic appeared */ if (apic && (pls->ctx->nb_streams != 2 || !pls->ctx->streams[1]->attached_pic.data)) return 1; if (apic) { int size = pls->ctx->streams[1]->attached_pic.size; if (size != apic->buf->size - AV_INPUT_BUFFER_PADDING_SIZE) return 1; if (memcmp(apic->buf->data, pls->ctx->streams[1]->attached_pic.data, size) != 0) return 1; } return 0; } /* Parse ID3 data and handle the found data */ static void handle_id3(AVIOContext *pb, struct playlist *pls) { AVDictionary *metadata = NULL; ID3v2ExtraMetaAPIC *apic = NULL; ID3v2ExtraMeta *extra_meta = NULL; int64_t timestamp = AV_NOPTS_VALUE; parse_id3(pls->ctx, pb, &metadata, &timestamp, &apic, &extra_meta); if (timestamp != AV_NOPTS_VALUE) { pls->id3_mpegts_timestamp = timestamp; pls->id3_offset = 0; } if (!pls->id3_found) { /* initial ID3 tags */ av_assert0(!pls->id3_deferred_extra); pls->id3_found = 1; /* get picture attachment and set text metadata */ if (pls->ctx->nb_streams) ff_id3v2_parse_apic(pls->ctx, &extra_meta); else /* demuxer not yet opened, defer picture attachment */ pls->id3_deferred_extra = extra_meta; av_dict_copy(&pls->ctx->metadata, metadata, 0); pls->id3_initial = metadata; } else { if (!pls->id3_changed && id3_has_changed_values(pls, metadata, apic)) { avpriv_report_missing_feature(pls->ctx, "Changing ID3 metadata in HLS audio elementary stream"); pls->id3_changed = 1; } av_dict_free(&metadata); } if (!pls->id3_deferred_extra) ff_id3v2_free_extra_meta(&extra_meta); } /* Intercept and handle ID3 tags between URLContext and AVIOContext */ static void intercept_id3(struct playlist *pls, uint8_t *buf, int buf_size, int *len) { /* intercept id3 tags, we do not want to pass them to the raw * demuxer on all segment switches */ int bytes; int id3_buf_pos = 0; int fill_buf = 0; struct segment *seg = current_segment(pls); /* gather all the id3 tags */ while (1) { /* see if we can retrieve enough data for ID3 header */ if (*len < ID3v2_HEADER_SIZE && buf_size >= ID3v2_HEADER_SIZE) { bytes = read_from_url(pls, seg, buf + *len, ID3v2_HEADER_SIZE - *len, READ_COMPLETE); if (bytes > 0) { if (bytes == ID3v2_HEADER_SIZE - *len) /* no EOF yet, so fill the caller buffer again after * we have stripped the ID3 tags */ fill_buf = 1; *len += bytes; } else if (*len <= 0) { /* error/EOF */ *len = bytes; fill_buf = 0; } } if (*len < ID3v2_HEADER_SIZE) break; if (ff_id3v2_match(buf, ID3v2_DEFAULT_MAGIC)) { int64_t maxsize = seg->size >= 0 ? seg->size : 1024*1024; int taglen = ff_id3v2_tag_len(buf); int tag_got_bytes = FFMIN(taglen, *len); int remaining = taglen - tag_got_bytes; if (taglen > maxsize) { av_log(pls->ctx, AV_LOG_ERROR, "Too large HLS ID3 tag (%d > %"PRId64" bytes)\n", taglen, maxsize); break; } /* * Copy the id3 tag to our temporary id3 buffer. * We could read a small id3 tag directly without memcpy, but * we would still need to copy the large tags, and handling * both of those cases together with the possibility for multiple * tags would make the handling a bit complex. */ pls->id3_buf = av_fast_realloc(pls->id3_buf, &pls->id3_buf_size, id3_buf_pos + taglen); if (!pls->id3_buf) break; memcpy(pls->id3_buf + id3_buf_pos, buf, tag_got_bytes); id3_buf_pos += tag_got_bytes; /* strip the intercepted bytes */ *len -= tag_got_bytes; memmove(buf, buf + tag_got_bytes, *len); av_log(pls->ctx, AV_LOG_DEBUG, "Stripped %d HLS ID3 bytes\n", tag_got_bytes); if (remaining > 0) { /* read the rest of the tag in */ if (read_from_url(pls, seg, pls->id3_buf + id3_buf_pos, remaining, READ_COMPLETE) != remaining) break; id3_buf_pos += remaining; av_log(pls->ctx, AV_LOG_DEBUG, "Stripped additional %d HLS ID3 bytes\n", remaining); } } else { /* no more ID3 tags */ break; } } /* re-fill buffer for the caller unless EOF */ if (*len >= 0 && (fill_buf || *len == 0)) { bytes = read_from_url(pls, seg, buf + *len, buf_size - *len, READ_NORMAL); /* ignore error if we already had some data */ if (bytes >= 0) *len += bytes; else if (*len == 0) *len = bytes; } if (pls->id3_buf) { /* Now parse all the ID3 tags */ AVIOContext id3ioctx; ffio_init_context(&id3ioctx, pls->id3_buf, id3_buf_pos, 0, NULL, NULL, NULL, NULL); handle_id3(&id3ioctx, pls); } if (pls->is_id3_timestamped == -1) pls->is_id3_timestamped = (pls->id3_mpegts_timestamp != AV_NOPTS_VALUE); } static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg) { AVDictionary *opts = NULL; int ret; // broker prior HTTP options that should be consistent across requests av_dict_set(&opts, "user-agent", c->user_agent, 0); av_dict_set(&opts, "cookies", c->cookies, 0); av_dict_set(&opts, "headers", c->headers, 0); av_dict_set(&opts, "seekable", "0", 0); if (seg->size >= 0) { /* try to restrict the HTTP request to the part we want * (if this is in fact a HTTP request) */ av_dict_set_int(&opts, "offset", seg->url_offset, 0); av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0); } av_log(pls->parent, AV_LOG_VERBOSE, "HLS request for url '%s', offset %"PRId64", playlist %d\n", seg->url, seg->url_offset, pls->index); if (seg->key_type == KEY_NONE) { ret = open_url(pls->parent->priv_data, &pls->input, seg->url, opts); } else if (seg->key_type == KEY_AES_128) { // HLSContext *c = var->parent->priv_data; char iv[33], key[33], url[MAX_URL_SIZE]; if (strcmp(seg->key, pls->key_url)) { URLContext *uc; if (open_url(pls->parent->priv_data, &uc, seg->key, opts) == 0) { if (ffurl_read_complete(uc, pls->key, sizeof(pls->key)) != sizeof(pls->key)) { av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n", seg->key); } ffurl_close(uc); } else { av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n", seg->key); } av_strlcpy(pls->key_url, seg->key, sizeof(pls->key_url)); } ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0); ff_data_to_hex(key, pls->key, sizeof(pls->key), 0); iv[32] = key[32] = '\0'; if (strstr(seg->url, "://")) snprintf(url, sizeof(url), "crypto+%s", seg->url); else snprintf(url, sizeof(url), "crypto:%s", seg->url); if ((ret = ffurl_alloc(&pls->input, url, AVIO_FLAG_READ, &pls->parent->interrupt_callback)) < 0) goto cleanup; av_opt_set(pls->input->priv_data, "key", key, 0); av_opt_set(pls->input->priv_data, "iv", iv, 0); if ((ret = url_connect(pls, c->avio_opts, opts)) < 0) { goto cleanup; } ret = 0; } else if (seg->key_type == KEY_SAMPLE_AES) { av_log(pls->parent, AV_LOG_ERROR, "SAMPLE-AES encryption is not supported yet\n"); ret = AVERROR_PATCHWELCOME; } else ret = AVERROR(ENOSYS); /* Seek to the requested position. If this was a HTTP request, the offset * should already be where want it to, but this allows e.g. local testing * without a HTTP server. */ if (ret == 0 && seg->key_type == KEY_NONE && seg->url_offset) { int seekret = ffurl_seek(pls->input, seg->url_offset, SEEK_SET); if (seekret < 0) { av_log(pls->parent, AV_LOG_ERROR, "Unable to seek to offset %"PRId64" of HLS segment '%s'\n", seg->url_offset, seg->url); ret = seekret; ffurl_close(pls->input); pls->input = NULL; } } cleanup: av_dict_free(&opts); pls->cur_seg_offset = 0; return ret; } static int update_init_section(struct playlist *pls, struct segment *seg) { static const int max_init_section_size = 1024*1024; HLSContext *c = pls->parent->priv_data; int64_t sec_size; int64_t urlsize; int ret; if (seg->init_section == pls->cur_init_section) return 0; pls->cur_init_section = NULL; if (!seg->init_section) return 0; /* this will clobber playlist URLContext stuff, so this should be * called between segments only */ ret = open_input(c, pls, seg->init_section); if (ret < 0) { av_log(pls->parent, AV_LOG_WARNING, "Failed to open an initialization section in playlist %d\n", pls->index); return ret; } if (seg->init_section->size >= 0) sec_size = seg->init_section->size; else if ((urlsize = ffurl_size(pls->input)) >= 0) sec_size = urlsize; else sec_size = max_init_section_size; av_log(pls->parent, AV_LOG_DEBUG, "Downloading an initialization section of size %"PRId64"\n", sec_size); sec_size = FFMIN(sec_size, max_init_section_size); av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size); ret = read_from_url(pls, seg->init_section, pls->init_sec_buf, pls->init_sec_buf_size, READ_COMPLETE); ffurl_close(pls->input); pls->input = NULL; if (ret < 0) return ret; pls->cur_init_section = seg->init_section; pls->init_sec_data_len = ret; pls->init_sec_buf_read_offset = 0; /* spec says audio elementary streams do not have media initialization * sections, so there should be no ID3 timestamps */ pls->is_id3_timestamped = 0; return 0; } static int64_t default_reload_interval(struct playlist *pls) { return pls->n_segments > 0 ? pls->segments[pls->n_segments - 1]->duration : pls->target_duration; } static int read_data(void *opaque, uint8_t *buf, int buf_size) { struct playlist *v = opaque; HLSContext *c = v->parent->priv_data; int ret, i; int just_opened = 0; int reload_count = 0; restart: if (!v->needed) return AVERROR_EOF; if (!v->input) { int64_t reload_interval; struct segment *seg; /* Check that the playlist is still needed before opening a new * segment. */ if (v->ctx && v->ctx->nb_streams && v->parent->nb_streams >= v->stream_offset + v->ctx->nb_streams) { v->needed = 0; for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams; i++) { if (v->parent->streams[i]->discard < AVDISCARD_ALL) v->needed = 1; } } if (!v->needed) { av_log(v->parent, AV_LOG_INFO, "No longer receiving playlist %d\n", v->index); return AVERROR_EOF; } /* If this is a live stream and the reload interval has elapsed since * the last playlist reload, reload the playlists now. */ reload_interval = default_reload_interval(v); reload: reload_count++; if (reload_count > c->max_reload) return AVERROR_EOF; if (!v->finished && av_gettime_relative() - v->last_load_time >= reload_interval) { if ((ret = parse_playlist(c, v->url, v, NULL)) < 0) { av_log(v->parent, AV_LOG_WARNING, "Failed to reload playlist %d\n", v->index); return ret; } /* If we need to reload the playlist again below (if * there's still no more segments), switch to a reload * interval of half the target duration. */ reload_interval = v->target_duration / 2; } if (v->cur_seq_no < v->start_seq_no) { av_log(NULL, AV_LOG_WARNING, "skipping %d segments ahead, expired from playlists\n", v->start_seq_no - v->cur_seq_no); v->cur_seq_no = v->start_seq_no; } if (v->cur_seq_no >= v->start_seq_no + v->n_segments) { if (v->finished) return AVERROR_EOF; while (av_gettime_relative() - v->last_load_time < reload_interval) { if (ff_check_interrupt(c->interrupt_callback)) return AVERROR_EXIT; av_usleep(100*1000); } /* Enough time has elapsed since the last reload */ goto reload; } seg = current_segment(v); /* load/update Media Initialization Section, if any */ ret = update_init_section(v, seg); if (ret) return ret; ret = open_input(c, v, seg); if (ret < 0) { if (ff_check_interrupt(c->interrupt_callback)) return AVERROR_EXIT; av_log(v->parent, AV_LOG_WARNING, "Failed to open segment of playlist %d\n", v->index); v->cur_seq_no += 1; goto reload; } just_opened = 1; } if (v->init_sec_buf_read_offset < v->init_sec_data_len) { /* Push init section out first before first actual segment */ int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size); memcpy(buf, v->init_sec_buf, copy_size); v->init_sec_buf_read_offset += copy_size; return copy_size; } ret = read_from_url(v, current_segment(v), buf, buf_size, READ_NORMAL); if (ret > 0) { if (just_opened && v->is_id3_timestamped != 0) { /* Intercept ID3 tags here, elementary audio streams are required * to convey timestamps using them in the beginning of each segment. */ intercept_id3(v, buf, buf_size, &ret); } return ret; } ffurl_close(v->input); v->input = NULL; v->cur_seq_no++; c->cur_seq_no = v->cur_seq_no; goto restart; } static int playlist_in_multiple_variants(HLSContext *c, struct playlist *pls) { int variant_count = 0; int i, j; for (i = 0; i < c->n_variants && variant_count < 2; i++) { struct variant *v = c->variants[i]; for (j = 0; j < v->n_playlists; j++) { if (v->playlists[j] == pls) { variant_count++; break; } } } return variant_count >= 2; } static void add_renditions_to_variant(HLSContext *c, struct variant *var, enum AVMediaType type, const char *group_id) { int i; for (i = 0; i < c->n_renditions; i++) { struct rendition *rend = c->renditions[i]; if (rend->type == type && !strcmp(rend->group_id, group_id)) { if (rend->playlist) /* rendition is an external playlist * => add the playlist to the variant */ dynarray_add(&var->playlists, &var->n_playlists, rend->playlist); else /* rendition is part of the variant main Media Playlist * => add the rendition to the main Media Playlist */ dynarray_add(&var->playlists[0]->renditions, &var->playlists[0]->n_renditions, rend); } } } static void add_metadata_from_renditions(AVFormatContext *s, struct playlist *pls, enum AVMediaType type) { int rend_idx = 0; int i; for (i = 0; i < pls->ctx->nb_streams; i++) { AVStream *st = s->streams[pls->stream_offset + i]; if (st->codec->codec_type != type) continue; for (; rend_idx < pls->n_renditions; rend_idx++) { struct rendition *rend = pls->renditions[rend_idx]; if (rend->type != type) continue; if (rend->language[0]) av_dict_set(&st->metadata, "language", rend->language, 0); if (rend->name[0]) av_dict_set(&st->metadata, "comment", rend->name, 0); st->disposition |= rend->disposition; } if (rend_idx >=pls->n_renditions) break; } } /* if timestamp was in valid range: returns 1 and sets seq_no * if not: returns 0 and sets seq_no to closest segment */ static int find_timestamp_in_playlist(HLSContext *c, struct playlist *pls, int64_t timestamp, int *seq_no) { int i; int64_t pos = c->first_timestamp == AV_NOPTS_VALUE ? 0 : c->first_timestamp; if (timestamp < pos) { *seq_no = pls->start_seq_no; return 0; } for (i = 0; i < pls->n_segments; i++) { int64_t diff = pos + pls->segments[i]->duration - timestamp; if (diff > 0) { *seq_no = pls->start_seq_no + i; return 1; } pos += pls->segments[i]->duration; } *seq_no = pls->start_seq_no + pls->n_segments - 1; return 0; } static int select_cur_seq_no(HLSContext *c, struct playlist *pls) { int seq_no; if (!pls->finished && !c->first_packet && av_gettime_relative() - pls->last_load_time >= default_reload_interval(pls)) /* reload the playlist since it was suspended */ parse_playlist(c, pls->url, pls, NULL); /* If playback is already in progress (we are just selecting a new * playlist) and this is a complete file, find the matching segment * by counting durations. */ if (pls->finished && c->cur_timestamp != AV_NOPTS_VALUE) { find_timestamp_in_playlist(c, pls, c->cur_timestamp, &seq_no); return seq_no; } if (!pls->finished) { if (!c->first_packet && /* we are doing a segment selection during playback */ c->cur_seq_no >= pls->start_seq_no && c->cur_seq_no < pls->start_seq_no + pls->n_segments) /* While spec 3.4.3 says that we cannot assume anything about the * content at the same sequence number on different playlists, * in practice this seems to work and doing it otherwise would * require us to download a segment to inspect its timestamps. */ return c->cur_seq_no; /* If this is a live stream, start live_start_index segments from the * start or end */ if (c->live_start_index < 0) return pls->start_seq_no + FFMAX(pls->n_segments + c->live_start_index, 0); else return pls->start_seq_no + FFMIN(c->live_start_index, pls->n_segments - 1); } /* Otherwise just start on the first segment. */ return pls->start_seq_no; } static int save_avio_options(AVFormatContext *s) { HLSContext *c = s->priv_data; const char *opts[] = { "headers", "user_agent", "user-agent", "cookies", NULL }, **opt = opts; uint8_t *buf; int ret = 0; while (*opt) { if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) { ret = av_dict_set(&c->avio_opts, *opt, buf, AV_DICT_DONT_STRDUP_VAL); if (ret < 0) return ret; } opt++; } return ret; } static int hls_read_header(AVFormatContext *s) { URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque; HLSContext *c = s->priv_data; int ret = 0, i, j, stream_offset = 0; c->interrupt_callback = &s->interrupt_callback; c->first_packet = 1; c->first_timestamp = AV_NOPTS_VALUE; c->cur_timestamp = AV_NOPTS_VALUE; // if the URL context is good, read important options we must broker later if (u && u->prot->priv_data_class) { // get the previous user agent & set back to null if string size is zero update_options(&c->user_agent, "user-agent", u->priv_data); // get the previous cookies & set back to null if string size is zero update_options(&c->cookies, "cookies", u->priv_data); // get the previous headers & set back to null if string size is zero update_options(&c->headers, "headers", u->priv_data); } if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0) goto fail; if ((ret = save_avio_options(s)) < 0) goto fail; /* Some HLS servers don't like being sent the range header */ av_dict_set(&c->avio_opts, "seekable", "0", 0); if (c->n_variants == 0) { av_log(NULL, AV_LOG_WARNING, "Empty playlist\n"); ret = AVERROR_EOF; goto fail; } /* If the playlist only contained playlists (Master Playlist), * parse each individual playlist. */ if (c->n_playlists > 1 || c->playlists[0]->n_segments == 0) { for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; if ((ret = parse_playlist(c, pls->url, pls, NULL)) < 0) goto fail; } } if (c->variants[0]->playlists[0]->n_segments == 0) { av_log(NULL, AV_LOG_WARNING, "Empty playlist\n"); ret = AVERROR_EOF; goto fail; } /* If this isn't a live stream, calculate the total duration of the * stream. */ if (c->variants[0]->playlists[0]->finished) { int64_t duration = 0; for (i = 0; i < c->variants[0]->playlists[0]->n_segments; i++) duration += c->variants[0]->playlists[0]->segments[i]->duration; s->duration = duration; } /* Associate renditions with variants */ for (i = 0; i < c->n_variants; i++) { struct variant *var = c->variants[i]; if (var->audio_group[0]) add_renditions_to_variant(c, var, AVMEDIA_TYPE_AUDIO, var->audio_group); if (var->video_group[0]) add_renditions_to_variant(c, var, AVMEDIA_TYPE_VIDEO, var->video_group); if (var->subtitles_group[0]) add_renditions_to_variant(c, var, AVMEDIA_TYPE_SUBTITLE, var->subtitles_group); } /* Open the demuxer for each playlist */ for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; AVInputFormat *in_fmt = NULL; if (!(pls->ctx = avformat_alloc_context())) { ret = AVERROR(ENOMEM); goto fail; } if (pls->n_segments == 0) continue; pls->index = i; pls->needed = 1; pls->parent = s; pls->cur_seq_no = select_cur_seq_no(c, pls); pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE); if (!pls->read_buffer){ ret = AVERROR(ENOMEM); avformat_free_context(pls->ctx); pls->ctx = NULL; goto fail; } ffio_init_context(&pls->pb, pls->read_buffer, INITIAL_BUFFER_SIZE, 0, pls, read_data, NULL, NULL); pls->pb.seekable = 0; ret = av_probe_input_buffer(&pls->pb, &in_fmt, pls->segments[0]->url, NULL, 0, 0); if (ret < 0) { /* Free the ctx - it isn't initialized properly at this point, * so avformat_close_input shouldn't be called. If * avformat_open_input fails below, it frees and zeros the * context, so it doesn't need any special treatment like this. */ av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", pls->segments[0]->url); avformat_free_context(pls->ctx); pls->ctx = NULL; goto fail; } pls->ctx->pb = &pls->pb; pls->stream_offset = stream_offset; if ((ret = ff_copy_whitelists(pls->ctx, s)) < 0) goto fail; ret = avformat_open_input(&pls->ctx, pls->segments[0]->url, in_fmt, NULL); if (ret < 0) goto fail; if (pls->id3_deferred_extra && pls->ctx->nb_streams == 1) { ff_id3v2_parse_apic(pls->ctx, &pls->id3_deferred_extra); avformat_queue_attached_pictures(pls->ctx); ff_id3v2_free_extra_meta(&pls->id3_deferred_extra); pls->id3_deferred_extra = NULL; } pls->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER; ret = avformat_find_stream_info(pls->ctx, NULL); if (ret < 0) goto fail; if (pls->is_id3_timestamped == -1) av_log(s, AV_LOG_WARNING, "No expected HTTP requests have been made\n"); /* Create new AVStreams for each stream in this playlist */ for (j = 0; j < pls->ctx->nb_streams; j++) { AVStream *st = avformat_new_stream(s, NULL); AVStream *ist = pls->ctx->streams[j]; if (!st) { ret = AVERROR(ENOMEM); goto fail; } st->id = i; avcodec_copy_context(st->codec, pls->ctx->streams[j]->codec); if (pls->is_id3_timestamped) /* custom timestamps via id3 */ avpriv_set_pts_info(st, 33, 1, MPEG_TIME_BASE); else avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den); } add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_AUDIO); add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_VIDEO); add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_SUBTITLE); stream_offset += pls->ctx->nb_streams; } /* Create a program for each variant */ for (i = 0; i < c->n_variants; i++) { struct variant *v = c->variants[i]; AVProgram *program; program = av_new_program(s, i); if (!program) goto fail; av_dict_set_int(&program->metadata, "variant_bitrate", v->bandwidth, 0); for (j = 0; j < v->n_playlists; j++) { struct playlist *pls = v->playlists[j]; int is_shared = playlist_in_multiple_variants(c, pls); int k; for (k = 0; k < pls->ctx->nb_streams; k++) { struct AVStream *st = s->streams[pls->stream_offset + k]; ff_program_add_stream_index(s, i, pls->stream_offset + k); /* Set variant_bitrate for streams unique to this variant */ if (!is_shared && v->bandwidth) av_dict_set_int(&st->metadata, "variant_bitrate", v->bandwidth, 0); } } } return 0; fail: free_playlist_list(c); free_variant_list(c); free_rendition_list(c); return ret; } static int recheck_discard_flags(AVFormatContext *s, int first) { HLSContext *c = s->priv_data; int i, changed = 0; /* Check if any new streams are needed */ for (i = 0; i < c->n_playlists; i++) c->playlists[i]->cur_needed = 0; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; struct playlist *pls = c->playlists[s->streams[i]->id]; if (st->discard < AVDISCARD_ALL) pls->cur_needed = 1; } for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; if (pls->cur_needed && !pls->needed) { pls->needed = 1; changed = 1; pls->cur_seq_no = select_cur_seq_no(c, pls); pls->pb.eof_reached = 0; if (c->cur_timestamp != AV_NOPTS_VALUE) { /* catch up */ pls->seek_timestamp = c->cur_timestamp; pls->seek_flags = AVSEEK_FLAG_ANY; pls->seek_stream_index = -1; } av_log(s, AV_LOG_INFO, "Now receiving playlist %d, segment %d\n", i, pls->cur_seq_no); } else if (first && !pls->cur_needed && pls->needed) { if (pls->input) ffurl_close(pls->input); pls->input = NULL; pls->needed = 0; changed = 1; av_log(s, AV_LOG_INFO, "No longer receiving playlist %d\n", i); } } return changed; } static void fill_timing_for_id3_timestamped_stream(struct playlist *pls) { if (pls->id3_offset >= 0) { pls->pkt.dts = pls->id3_mpegts_timestamp + av_rescale_q(pls->id3_offset, pls->ctx->streams[pls->pkt.stream_index]->time_base, MPEG_TIME_BASE_Q); if (pls->pkt.duration) pls->id3_offset += pls->pkt.duration; else pls->id3_offset = -1; } else { /* there have been packets with unknown duration * since the last id3 tag, should not normally happen */ pls->pkt.dts = AV_NOPTS_VALUE; } if (pls->pkt.duration) pls->pkt.duration = av_rescale_q(pls->pkt.duration, pls->ctx->streams[pls->pkt.stream_index]->time_base, MPEG_TIME_BASE_Q); pls->pkt.pts = AV_NOPTS_VALUE; } static AVRational get_timebase(struct playlist *pls) { if (pls->is_id3_timestamped) return MPEG_TIME_BASE_Q; return pls->ctx->streams[pls->pkt.stream_index]->time_base; } static int compare_ts_with_wrapdetect(int64_t ts_a, struct playlist *pls_a, int64_t ts_b, struct playlist *pls_b) { int64_t scaled_ts_a = av_rescale_q(ts_a, get_timebase(pls_a), MPEG_TIME_BASE_Q); int64_t scaled_ts_b = av_rescale_q(ts_b, get_timebase(pls_b), MPEG_TIME_BASE_Q); return av_compare_mod(scaled_ts_a, scaled_ts_b, 1LL << 33); } static int hls_read_packet(AVFormatContext *s, AVPacket *pkt) { HLSContext *c = s->priv_data; int ret, i, minplaylist = -1; recheck_discard_flags(s, c->first_packet); c->first_packet = 0; for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; /* Make sure we've got one buffered packet from each open playlist * stream */ if (pls->needed && !pls->pkt.data) { while (1) { int64_t ts_diff; AVRational tb; ret = av_read_frame(pls->ctx, &pls->pkt); if (ret < 0) { if (!avio_feof(&pls->pb) && ret != AVERROR_EOF) return ret; reset_packet(&pls->pkt); break; } else { /* stream_index check prevents matching picture attachments etc. */ if (pls->is_id3_timestamped && pls->pkt.stream_index == 0) { /* audio elementary streams are id3 timestamped */ fill_timing_for_id3_timestamped_stream(pls); } if (c->first_timestamp == AV_NOPTS_VALUE && pls->pkt.dts != AV_NOPTS_VALUE) c->first_timestamp = av_rescale_q(pls->pkt.dts, get_timebase(pls), AV_TIME_BASE_Q); } if (pls->seek_timestamp == AV_NOPTS_VALUE) break; if (pls->seek_stream_index < 0 || pls->seek_stream_index == pls->pkt.stream_index) { if (pls->pkt.dts == AV_NOPTS_VALUE) { pls->seek_timestamp = AV_NOPTS_VALUE; break; } tb = get_timebase(pls); ts_diff = av_rescale_rnd(pls->pkt.dts, AV_TIME_BASE, tb.den, AV_ROUND_DOWN) - pls->seek_timestamp; if (ts_diff >= 0 && (pls->seek_flags & AVSEEK_FLAG_ANY || pls->pkt.flags & AV_PKT_FLAG_KEY)) { pls->seek_timestamp = AV_NOPTS_VALUE; break; } } av_free_packet(&pls->pkt); reset_packet(&pls->pkt); } } /* Check if this stream has the packet with the lowest dts */ if (pls->pkt.data) { struct playlist *minpls = minplaylist < 0 ? NULL : c->playlists[minplaylist]; if (minplaylist < 0) { minplaylist = i; } else { int64_t dts = pls->pkt.dts; int64_t mindts = minpls->pkt.dts; if (dts == AV_NOPTS_VALUE || (mindts != AV_NOPTS_VALUE && compare_ts_with_wrapdetect(dts, pls, mindts, minpls) < 0)) minplaylist = i; } } } /* If we got a packet, return it */ if (minplaylist >= 0) { struct playlist *pls = c->playlists[minplaylist]; *pkt = pls->pkt; pkt->stream_index += pls->stream_offset; reset_packet(&c->playlists[minplaylist]->pkt); if (pkt->dts != AV_NOPTS_VALUE) c->cur_timestamp = av_rescale_q(pkt->dts, pls->ctx->streams[pls->pkt.stream_index]->time_base, AV_TIME_BASE_Q); return 0; } return AVERROR_EOF; } static int hls_close(AVFormatContext *s) { HLSContext *c = s->priv_data; free_playlist_list(c); free_variant_list(c); free_rendition_list(c); av_dict_free(&c->avio_opts); return 0; } static int hls_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { HLSContext *c = s->priv_data; struct playlist *seek_pls = NULL; int i, seq_no; int64_t first_timestamp, seek_timestamp, duration; if ((flags & AVSEEK_FLAG_BYTE) || !(c->variants[0]->playlists[0]->finished || c->variants[0]->playlists[0]->type == PLS_TYPE_EVENT)) return AVERROR(ENOSYS); first_timestamp = c->first_timestamp == AV_NOPTS_VALUE ? 0 : c->first_timestamp; seek_timestamp = av_rescale_rnd(timestamp, AV_TIME_BASE, s->streams[stream_index]->time_base.den, flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP); duration = s->duration == AV_NOPTS_VALUE ? 0 : s->duration; if (0 < duration && duration < seek_timestamp - first_timestamp) return AVERROR(EIO); /* find the playlist with the specified stream */ for (i = 0; i < c->n_playlists; i++) { struct playlist *pls = c->playlists[i]; if (stream_index >= pls->stream_offset && stream_index - pls->stream_offset < pls->ctx->nb_streams) { seek_pls = pls; break; } } /* check if the timestamp is valid for the playlist with the * specified stream index */ if (!seek_pls || !find_timestamp_in_playlist(c, seek_pls, seek_timestamp, &seq_no)) return AVERROR(EIO); /* set segment now so we do not need to search again below */ seek_pls->cur_seq_no = seq_no; seek_pls->seek_stream_index = stream_index - seek_pls->stream_offset; for (i = 0; i < c->n_playlists; i++) { /* Reset reading */ struct playlist *pls = c->playlists[i]; if (pls->input) { ffurl_close(pls->input); pls->input = NULL; } av_free_packet(&pls->pkt); reset_packet(&pls->pkt); pls->pb.eof_reached = 0; /* Clear any buffered data */ pls->pb.buf_end = pls->pb.buf_ptr = pls->pb.buffer; /* Reset the pos, to let the mpegts demuxer know we've seeked. */ pls->pb.pos = 0; /* Flush the packet queue of the subdemuxer. */ ff_read_frame_flush(pls->ctx); pls->seek_timestamp = seek_timestamp; pls->seek_flags = flags; if (pls != seek_pls) { /* set closest segment seq_no for playlists not handled above */ find_timestamp_in_playlist(c, pls, seek_timestamp, &pls->cur_seq_no); /* seek the playlist to the given position without taking * keyframes into account since this playlist does not have the * specified stream where we should look for the keyframes */ pls->seek_stream_index = -1; pls->seek_flags |= AVSEEK_FLAG_ANY; } } c->cur_timestamp = seek_timestamp; return 0; } static int hls_probe(AVProbeData *p) { /* Require #EXTM3U at the start, and either one of the ones below * somewhere for a proper match. */ if (strncmp(p->buf, "#EXTM3U", 7)) return 0; if (strstr(p->buf, "#EXT-X-STREAM-INF:") || strstr(p->buf, "#EXT-X-TARGETDURATION:") || strstr(p->buf, "#EXT-X-MEDIA-SEQUENCE:")) return AVPROBE_SCORE_MAX; return 0; } #define OFFSET(x) offsetof(HLSContext, x) #define FLAGS AV_OPT_FLAG_DECODING_PARAM static const AVOption hls_options[] = { {"live_start_index", "segment index to start live streams at (negative values are from the end)", OFFSET(live_start_index), AV_OPT_TYPE_INT, {.i64 = -3}, INT_MIN, INT_MAX, FLAGS}, {"allowed_extensions", "List of file extensions that hls is allowed to access", OFFSET(allowed_extensions), AV_OPT_TYPE_STRING, {.str = "3gp,aac,avi,flac,mkv,m3u8,m4a,m4s,m4v,mpg,mov,mp2,mp3,mp4,mpeg,mpegts,ogg,ogv,oga,ts,vob,wav"}, INT_MIN, INT_MAX, FLAGS}, {"max_reload", "Maximum number of times a insufficient list is attempted to be reloaded", OFFSET(max_reload), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, FLAGS}, {NULL} }; static const AVClass hls_class = { .class_name = "hls,applehttp", .item_name = av_default_item_name, .option = hls_options, .version = LIBAVUTIL_VERSION_INT, }; AVInputFormat ff_hls_demuxer = { .name = "hls,applehttp", .long_name = NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming"), .priv_class = &hls_class, .priv_data_size = sizeof(HLSContext), .read_probe = hls_probe, .read_header = hls_read_header, .read_packet = hls_read_packet, .read_close = hls_close, .read_seek = hls_read_seek, };
./CrossVul/dataset_final_sorted/CWE-416/c/bad_4045_0
crossvul-cpp_data_bad_3245_0
/* fe-netjoin.c : irssi Copyright (C) 2000 Timo Sirainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "module.h" #include "module-formats.h" #include "signals.h" #include "levels.h" #include "misc.h" #include "settings.h" #include "irc-servers.h" #include "modes.h" #include "ignore.h" #include "netsplit.h" #include "printtext.h" #define NETJOIN_WAIT_TIME 5 /* how many seconds to wait for the netsplitted JOIN messages to stop */ #define NETJOIN_MAX_WAIT 30 /* how many seconds to wait for nick to join to the rest of the channels she was before the netsplit */ typedef struct { char *nick; GSList *old_channels; GSList *now_channels; } NETJOIN_REC; typedef struct { IRC_SERVER_REC *server; time_t last_netjoin; GSList *netjoins; } NETJOIN_SERVER_REC; typedef struct { int count; GString *nicks; } TEMP_PRINT_REC; static int join_tag; static int netjoin_max_nicks, hide_netsplit_quits; static int printing_joins; static GSList *joinservers; static NETJOIN_SERVER_REC *netjoin_find_server(IRC_SERVER_REC *server) { GSList *tmp; g_return_val_if_fail(server != NULL, NULL); for (tmp = joinservers; tmp != NULL; tmp = tmp->next) { NETJOIN_SERVER_REC *rec = tmp->data; if (rec->server == server) return rec; } return NULL; } static NETJOIN_REC *netjoin_add(IRC_SERVER_REC *server, const char *nick, GSList *channels) { NETJOIN_REC *rec; NETJOIN_SERVER_REC *srec; g_return_val_if_fail(server != NULL, NULL); g_return_val_if_fail(nick != NULL, NULL); rec = g_new0(NETJOIN_REC, 1); rec->nick = g_strdup(nick); while (channels != NULL) { NETSPLIT_CHAN_REC *channel = channels->data; rec->old_channels = g_slist_append(rec->old_channels, g_strdup(channel->name)); channels = channels->next; } srec = netjoin_find_server(server); if (srec == NULL) { srec = g_new0(NETJOIN_SERVER_REC, 1); srec->server = server; joinservers = g_slist_append(joinservers, srec); } srec->last_netjoin = time(NULL); srec->netjoins = g_slist_append(srec->netjoins, rec); return rec; } static NETJOIN_REC *netjoin_find(IRC_SERVER_REC *server, const char *nick) { NETJOIN_SERVER_REC *srec; GSList *tmp; g_return_val_if_fail(server != NULL, NULL); g_return_val_if_fail(nick != NULL, NULL); srec = netjoin_find_server(server); if (srec == NULL) return NULL; for (tmp = srec->netjoins; tmp != NULL; tmp = tmp->next) { NETJOIN_REC *rec = tmp->data; if (g_ascii_strcasecmp(rec->nick, nick) == 0) return rec; } return NULL; } static void netjoin_remove(NETJOIN_SERVER_REC *server, NETJOIN_REC *rec) { server->netjoins = g_slist_remove(server->netjoins, rec); g_slist_foreach(rec->old_channels, (GFunc) g_free, NULL); g_slist_foreach(rec->now_channels, (GFunc) g_free, NULL); g_slist_free(rec->old_channels); g_slist_free(rec->now_channels); g_free(rec->nick); g_free(rec); } static void netjoin_server_remove(NETJOIN_SERVER_REC *server) { joinservers = g_slist_remove(joinservers, server); while (server->netjoins != NULL) netjoin_remove(server, server->netjoins->data); g_free(server); } static void print_channel_netjoins(char *channel, TEMP_PRINT_REC *rec, NETJOIN_SERVER_REC *server) { if (rec->nicks->len > 0) g_string_truncate(rec->nicks, rec->nicks->len-2); printformat(server->server, channel, MSGLEVEL_JOINS, rec->count > netjoin_max_nicks ? IRCTXT_NETSPLIT_JOIN_MORE : IRCTXT_NETSPLIT_JOIN, rec->nicks->str, rec->count-netjoin_max_nicks); g_string_free(rec->nicks, TRUE); g_free(rec); g_free(channel); } static void print_netjoins(NETJOIN_SERVER_REC *server, const char *filter_channel) { TEMP_PRINT_REC *temp; GHashTable *channels; GSList *tmp, *tmp2, *next, *next2, *old; g_return_if_fail(server != NULL); printing_joins = TRUE; /* save nicks to string, clear now_channels and remove the same channels from old_channels list */ channels = g_hash_table_new((GHashFunc) g_istr_hash, (GCompareFunc) g_istr_equal); for (tmp = server->netjoins; tmp != NULL; tmp = next) { NETJOIN_REC *rec = tmp->data; next = g_slist_next(tmp); for (tmp2 = rec->now_channels; tmp2 != NULL; tmp2 = next2) { char *channel = tmp2->data; char *realchannel = channel + 1; next2 = g_slist_next(tmp2); /* Filter the results by channel if asked to do so */ if (filter_channel != NULL && strcasecmp(realchannel, filter_channel) != 0) continue; temp = g_hash_table_lookup(channels, realchannel); if (temp == NULL) { temp = g_new0(TEMP_PRINT_REC, 1); temp->nicks = g_string_new(NULL); g_hash_table_insert(channels, g_strdup(realchannel), temp); } temp->count++; if (temp->count <= netjoin_max_nicks) { if (*channel != ' ') g_string_append_c(temp->nicks, *channel); g_string_append_printf(temp->nicks, "%s, ", rec->nick); } /* remove the channel from old_channels too */ old = gslist_find_icase_string(rec->old_channels, realchannel); if (old != NULL) { void *data = old->data; rec->old_channels = g_slist_remove(rec->old_channels, data); g_free(data); } /* drop tmp2 from the list */ rec->now_channels = g_slist_delete_link(rec->now_channels, tmp2); g_free(channel); } if (rec->old_channels == NULL) netjoin_remove(server, rec); } g_hash_table_foreach(channels, (GHFunc) print_channel_netjoins, server); g_hash_table_destroy(channels); if (server->netjoins == NULL) netjoin_server_remove(server); printing_joins = FALSE; } /* something is going to be printed to screen, print our current netsplit message before it. */ static void sig_print_starting(TEXT_DEST_REC *dest) { NETJOIN_SERVER_REC *rec; if (printing_joins) return; if (!IS_IRC_SERVER(dest->server)) return; if (!(dest->level & MSGLEVEL_PUBLIC)) return; if (!server_ischannel(dest->server, dest->target)) return; rec = netjoin_find_server(IRC_SERVER(dest->server)); if (rec != NULL && rec->netjoins != NULL) print_netjoins(rec, dest->target); } static int sig_check_netjoins(void) { GSList *tmp, *next; int diff; time_t now; now = time(NULL); /* first print all netjoins which haven't had any new joins * for NETJOIN_WAIT_TIME; this may cause them to be removed * (all users who rejoined, rejoined all channels) */ for (tmp = joinservers; tmp != NULL; tmp = next) { NETJOIN_SERVER_REC *server = tmp->data; next = tmp->next; diff = now-server->last_netjoin; if (diff <= NETJOIN_WAIT_TIME) { /* wait for more JOINs */ continue; } if (server->netjoins != NULL) print_netjoins(server, NULL); } /* now remove all netjoins which haven't had any new joins * for NETJOIN_MAX_WAIT (user rejoined some but not all channels * after split) */ for (tmp = joinservers; tmp != NULL; tmp = next) { NETJOIN_SERVER_REC *server = tmp->data; next = tmp->next; diff = now-server->last_netjoin; if (diff >= NETJOIN_MAX_WAIT) { /* waited long enough, forget about the rest */ netjoin_server_remove(server); } } if (joinservers == NULL) { g_source_remove(join_tag); signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting); join_tag = -1; } return 1; } static void msg_quit(IRC_SERVER_REC *server, const char *nick, const char *address, const char *reason) { if (IS_IRC_SERVER(server) && quitmsg_is_split(reason)) signal_stop(); } static void msg_join(IRC_SERVER_REC *server, const char *channel, const char *nick, const char *address) { NETSPLIT_REC *split; NETJOIN_REC *netjoin; GSList *channels; int rejoin = 1; if (!IS_IRC_SERVER(server)) return; if (ignore_check(SERVER(server), nick, address, channel, NULL, MSGLEVEL_JOINS)) return; split = netsplit_find(server, nick, address); netjoin = netjoin_find(server, nick); if (split == NULL && netjoin == NULL) return; /* if this was not a channel they split from, treat it normally */ if (netjoin != NULL) { if (!gslist_find_icase_string(netjoin->old_channels, channel)) return; } else { channels = split->channels; while (channels != NULL) { NETSPLIT_CHAN_REC *schannel = channels->data; if (!strcasecmp(schannel->name, channel)) break; channels = channels->next; } /* we still need to create a NETJOIN_REC now as the * NETSPLIT_REC will be destroyed */ if (channels == NULL) rejoin = 0; } if (join_tag == -1) { join_tag = g_timeout_add(1000, (GSourceFunc) sig_check_netjoins, NULL); signal_add("print starting", (SIGNAL_FUNC) sig_print_starting); } if (netjoin == NULL) netjoin = netjoin_add(server, nick, split->channels); if (rejoin) { netjoin->now_channels = g_slist_append(netjoin->now_channels, g_strconcat(" ", channel, NULL)); signal_stop(); } } static int netjoin_set_nickmode(IRC_SERVER_REC *server, NETJOIN_REC *rec, const char *channel, char prefix) { GSList *pos; const char *flags; char *found_chan = NULL; for (pos = rec->now_channels; pos != NULL; pos = pos->next) { char *chan = pos->data; if (strcasecmp(chan+1, channel) == 0) { found_chan = chan; break; } } if (found_chan == NULL) return FALSE; flags = server->get_nick_flags(SERVER(server)); while (*flags != '\0') { if (found_chan[0] == *flags) break; if (prefix == *flags) { found_chan[0] = prefix; break; } flags++; } return TRUE; } static void msg_mode(IRC_SERVER_REC *server, const char *channel, const char *sender, const char *addr, const char *data) { NETJOIN_REC *rec; char *params, *mode, *nicks; char **nicklist, **nick, type, prefix; int show; g_return_if_fail(data != NULL); if (!server_ischannel(SERVER(server), channel) || addr != NULL) return; params = event_get_params(data, 2 | PARAM_FLAG_GETREST, &mode, &nicks); /* parse server mode changes - hide operator status changes and show them in the netjoin message instead as @ before the nick */ nick = nicklist = g_strsplit(nicks, " ", -1); type = '+'; show = FALSE; for (; *mode != '\0'; mode++) { if (*mode == '+' || *mode == '-') { type = *mode; continue; } if (*nick != NULL && GET_MODE_PREFIX(server, *mode)) { /* give/remove ops */ rec = netjoin_find(server, *nick); prefix = GET_MODE_PREFIX(server, *mode); if (rec == NULL || type != '+' || prefix == '\0' || !netjoin_set_nickmode(server, rec, channel, prefix)) show = TRUE; nick++; } else { if (HAS_MODE_ARG(server, type, *mode) && *nick != NULL) nick++; show = TRUE; } } if (!show) signal_stop(); g_strfreev(nicklist); g_free(params); } static void read_settings(void) { int old_hide; old_hide = hide_netsplit_quits; hide_netsplit_quits = settings_get_bool("hide_netsplit_quits"); netjoin_max_nicks = settings_get_int("netjoin_max_nicks"); if (old_hide && !hide_netsplit_quits) { signal_remove("message quit", (SIGNAL_FUNC) msg_quit); signal_remove("message join", (SIGNAL_FUNC) msg_join); signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode); } else if (!old_hide && hide_netsplit_quits) { signal_add("message quit", (SIGNAL_FUNC) msg_quit); signal_add("message join", (SIGNAL_FUNC) msg_join); signal_add("message irc mode", (SIGNAL_FUNC) msg_mode); } } void fe_netjoin_init(void) { settings_add_bool("misc", "hide_netsplit_quits", TRUE); settings_add_int("misc", "netjoin_max_nicks", 10); join_tag = -1; printing_joins = FALSE; read_settings(); signal_add("setup changed", (SIGNAL_FUNC) read_settings); } void fe_netjoin_deinit(void) { while (joinservers != NULL) netjoin_server_remove(joinservers->data); if (join_tag != -1) { g_source_remove(join_tag); signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting); } signal_remove("setup changed", (SIGNAL_FUNC) read_settings); signal_remove("message quit", (SIGNAL_FUNC) msg_quit); signal_remove("message join", (SIGNAL_FUNC) msg_join); signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode); }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_3245_0
crossvul-cpp_data_bad_1373_0
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2005-2012 * * This file is part of GPAC / MPEG2-TS sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_MPEG2TS #include <string.h> #include <gpac/constants.h> #include <gpac/internal/media_dev.h> #include <gpac/download.h> #ifndef GPAC_DISABLE_STREAMING #include <gpac/internal/ietf_dev.h> #endif #ifdef GPAC_CONFIG_LINUX #include <unistd.h> #endif #ifdef GPAC_ENABLE_MPE #include <gpac/dvb_mpe.h> #endif #ifdef GPAC_ENABLE_DSMCC #include <gpac/ait.h> #endif #define DEBUG_TS_PACKET 0 GF_EXPORT const char *gf_m2ts_get_stream_name(u32 streamType) { switch (streamType) { case GF_M2TS_VIDEO_MPEG1: return "MPEG-1 Video"; case GF_M2TS_VIDEO_MPEG2: return "MPEG-2 Video"; case GF_M2TS_AUDIO_MPEG1: return "MPEG-1 Audio"; case GF_M2TS_AUDIO_MPEG2: return "MPEG-2 Audio"; case GF_M2TS_PRIVATE_SECTION: return "Private Section"; case GF_M2TS_PRIVATE_DATA: return "Private Data"; case GF_M2TS_AUDIO_AAC: return "AAC Audio"; case GF_M2TS_VIDEO_MPEG4: return "MPEG-4 Video"; case GF_M2TS_VIDEO_H264: return "MPEG-4/H264 Video"; case GF_M2TS_VIDEO_SVC: return "H264-SVC Video"; case GF_M2TS_VIDEO_HEVC: return "HEVC Video"; case GF_M2TS_VIDEO_SHVC: return "SHVC Video"; case GF_M2TS_VIDEO_SHVC_TEMPORAL: return "SHVC Video Temporal Sublayer"; case GF_M2TS_VIDEO_MHVC: return "MHVC Video"; case GF_M2TS_VIDEO_MHVC_TEMPORAL: return "MHVC Video Temporal Sublayer"; case GF_M2TS_AUDIO_AC3: return "Dolby AC3 Audio"; case GF_M2TS_AUDIO_DTS: return "Dolby DTS Audio"; case GF_M2TS_SUBTITLE_DVB: return "DVB Subtitle"; case GF_M2TS_SYSTEMS_MPEG4_PES: return "MPEG-4 SL (PES)"; case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: return "MPEG-4 SL (Section)"; case GF_M2TS_MPE_SECTIONS: return "MPE (Section)"; case GF_M2TS_METADATA_PES: return "Metadata (PES)"; case GF_M2TS_METADATA_ID3_HLS: return "ID3/HLS Metadata (PES)"; default: return "Unknown"; } } static u32 gf_m2ts_reframe_default(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { GF_M2TS_PES_PCK pck; pck.flags = 0; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START; pck.DTS = pes->DTS; pck.PTS = pes->PTS; pck.data = (char *)data; pck.data_len = data_len; pck.stream = pes; ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck); /*we consumed all data*/ return 0; } static u32 gf_m2ts_reframe_reset(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { if (pes->pck_data) { gf_free(pes->pck_data); pes->pck_data = NULL; } pes->pck_data_len = pes->pck_alloc_len = 0; if (pes->prev_data) { gf_free(pes->prev_data); pes->prev_data = NULL; } pes->prev_data_len = 0; pes->pes_len = 0; pes->prev_PTS = 0; pes->reframe = NULL; pes->cc = -1; pes->temi_tc_desc_len = 0; return 0; } static void add_text(char **buffer, u32 *size, u32 *pos, char *msg, u32 msg_len) { if (!msg || !buffer) return; if (*pos+msg_len>*size) { *size = *pos+msg_len-*size+256; *buffer = (char *)gf_realloc(*buffer, *size); } strncpy((*buffer)+(*pos), msg, msg_len); *pos += msg_len; } static GF_Err id3_parse_tag(char *data, u32 length, char **output, u32 *output_size, u32 *output_pos) { GF_BitStream *bs; u32 pos; if ((data[0] != 'I') || (data[1] != 'D') || (data[2] != '3')) return GF_NOT_SUPPORTED; bs = gf_bs_new(data, length, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, 3); /*u8 major = */gf_bs_read_u8(bs); /*u8 minor = */gf_bs_read_u8(bs); /*u8 unsync = */gf_bs_read_int(bs, 1); /*u8 ext_hdr = */ gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 6); u32 size = gf_id3_read_size(bs); pos = (u32) gf_bs_get_position(bs); if (size != length-pos) size = length-pos; while (size && (gf_bs_available(bs)>=10) ) { u32 ftag = gf_bs_read_u32(bs); u32 fsize = gf_id3_read_size(bs); /*u16 fflags = */gf_bs_read_u16(bs); size -= 10; //TODO, handle more ID3 tags ? if (ftag==ID3V2_FRAME_TXXX) { u32 pos = (u32) gf_bs_get_position(bs); char *text = data+pos; add_text(output, output_size, output_pos, text, fsize); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] ID3 tag not handled, patch welcome\n", gf_4cc_to_str(ftag) ) ); } gf_bs_skip_bytes(bs, fsize); } gf_bs_del(bs); return GF_OK; } static u32 gf_m2ts_reframe_id3_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, Bool same_pts, unsigned char *data, u32 data_len, GF_M2TS_PESHeader *pes_hdr) { char frame_header[256]; char *output_text = NULL; u32 output_len = 0; u32 pos = 0; GF_M2TS_PES_PCK pck; pck.flags = 0; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; if (!same_pts) pck.flags |= GF_M2TS_PES_PCK_AU_START; pck.DTS = pes->DTS; pck.PTS = pes->PTS; sprintf(frame_header, LLU" --> NEXT\n", pes->PTS); add_text(&output_text, &output_len, &pos, frame_header, (u32)strlen(frame_header)); id3_parse_tag((char *)data, data_len, &output_text, &output_len, &pos); add_text(&output_text, &output_len, &pos, "\n\n", 2); pck.data = (char *)output_text; pck.data_len = pos; pck.stream = pes; ts->on_event(ts, GF_M2TS_EVT_PES_PCK, &pck); gf_free(output_text); /*we consumed all data*/ return 0; } static u32 gf_m2ts_sync(GF_M2TS_Demuxer *ts, char *data, u32 size, Bool simple_check) { u32 i=0; /*if first byte is sync assume we're sync*/ if (simple_check && (data[i]==0x47)) return 0; while (i < size) { if (i+192 >= size) return size; if ((data[i]==0x47) && (data[i+188]==0x47)) break; if (i+192 >= size) return size; if ((data[i]==0x47) && (data[i+192]==0x47)) { ts->prefix_present = 1; break; } i++; } if (i) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] re-sync skipped %d bytes\n", i) ); } return i; } GF_EXPORT Bool gf_m2ts_crc32_check(u8 *data, u32 len) { u32 crc = gf_crc_32(data, len); u32 crc_val = GF_4CC((u8) data[len], (u8) data[len+1], (u8) data[len+2], (u8) data[len+3]); return (crc==crc_val) ? GF_TRUE : GF_FALSE; } static GF_M2TS_SectionFilter *gf_m2ts_section_filter_new(gf_m2ts_section_callback process_section_callback, Bool process_individual) { GF_M2TS_SectionFilter *sec; GF_SAFEALLOC(sec, GF_M2TS_SectionFilter); if (!sec) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] gf_m2ts_section_filter_new : OUT OF MEMORY\n")); return NULL; } sec->cc = -1; sec->process_section = process_section_callback; sec->process_individual = process_individual; return sec; } static void gf_m2ts_reset_sections(GF_List *sections) { u32 count; GF_M2TS_Section *section; //GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Deleting sections\n")); count = gf_list_count(sections); while (count) { section = gf_list_get(sections, 0); gf_list_rem(sections, 0); if (section->data) gf_free(section->data); gf_free(section); count--; } } static void gf_m2ts_section_filter_reset(GF_M2TS_SectionFilter *sf) { if (sf->section) { gf_free(sf->section); sf->section = NULL; } while (sf->table) { GF_M2TS_Table *t = sf->table; sf->table = t->next; gf_m2ts_reset_sections(t->sections); gf_list_del(t->sections); gf_free(t); } sf->cc = -1; sf->length = sf->received = 0; sf->demux_restarted = 1; } static void gf_m2ts_section_filter_del(GF_M2TS_SectionFilter *sf) { gf_m2ts_section_filter_reset(sf); gf_free(sf); } static void gf_m2ts_metadata_descriptor_del(GF_M2TS_MetadataDescriptor *metad) { if (metad) { if (metad->service_id_record) gf_free(metad->service_id_record); if (metad->decoder_config) gf_free(metad->decoder_config); if (metad->decoder_config_id) gf_free(metad->decoder_config_id); gf_free(metad); } } GF_EXPORT void gf_m2ts_es_del(GF_M2TS_ES *es, GF_M2TS_Demuxer *ts) { gf_list_del_item(es->program->streams, es); if (es->flags & GF_M2TS_ES_IS_SECTION) { GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; if (ses->sec) gf_m2ts_section_filter_del(ses->sec); #ifdef GPAC_ENABLE_MPE if (es->flags & GF_M2TS_ES_IS_MPE) gf_dvb_mpe_section_del(es); #endif } else if (es->pid!=es->program->pmt_pid) { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; if ((pes->flags & GF_M2TS_INHERIT_PCR) && ts->ess[es->program->pcr_pid]==es) ts->ess[es->program->pcr_pid] = NULL; if (pes->pck_data) gf_free(pes->pck_data); if (pes->prev_data) gf_free(pes->prev_data); if (pes->buf) gf_free(pes->buf); if (pes->reassemble_buf) gf_free(pes->reassemble_buf); if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc); if (pes->metadata_descriptor) gf_m2ts_metadata_descriptor_del(pes->metadata_descriptor); } if (es->slcfg) gf_free(es->slcfg); gf_free(es); } static void gf_m2ts_reset_sdt(GF_M2TS_Demuxer *ts) { while (gf_list_count(ts->SDTs)) { GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_last(ts->SDTs); gf_list_rem_last(ts->SDTs); if (sdt->provider) gf_free(sdt->provider); if (sdt->service) gf_free(sdt->service); gf_free(sdt); } } GF_EXPORT GF_M2TS_SDT *gf_m2ts_get_sdt_info(GF_M2TS_Demuxer *ts, u32 program_id) { u32 i; for (i=0; i<gf_list_count(ts->SDTs); i++) { GF_M2TS_SDT *sdt = (GF_M2TS_SDT *)gf_list_get(ts->SDTs, i); if (sdt->service_id==program_id) return sdt; } return NULL; } static void gf_m2ts_section_complete(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses) { //seek mode, only process PAT and PMT if (ts->seek_mode && (sec->section[0] != GF_M2TS_TABLE_ID_PAT) && (sec->section[0] != GF_M2TS_TABLE_ID_PMT)) { /*clean-up (including broken sections)*/ if (sec->section) gf_free(sec->section); sec->section = NULL; sec->length = sec->received = 0; return; } if (!sec->process_section) { if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_AIT)) ) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; //ts->on_event(ts, GF_M2TS_EVT_AIT_FOUND, &pck); on_ait_section(ts, GF_M2TS_EVT_AIT_FOUND, &pck); #endif } else if ((ts->on_event && (sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_ENCAPSULATED_DATA || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_STREAM_DESCRIPTION || sec->section[0]==GF_M2TS_TABLE_ID_DSM_CC_PRIVATE)) ) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; on_dsmcc_section(ts,GF_M2TS_EVT_DSMCC_FOUND,&pck); //ts->on_event(ts, GF_M2TS_EVT_DSMCC_FOUND, &pck); #endif } #ifdef GPAC_ENABLE_MPE else if (ts->on_mpe_event && ((ses && (ses->flags & GF_M2TS_EVT_DVB_MPE)) || (sec->section[0]==GF_M2TS_TABLE_ID_INT)) ) { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_mpe_event(ts, GF_M2TS_EVT_DVB_MPE, &pck); } #endif else if (ts->on_event) { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck); } } else { Bool has_syntax_indicator; u8 table_id; u16 extended_table_id; u32 status, section_start, i; GF_M2TS_Table *t, *prev_t; unsigned char *data; Bool section_valid = 0; status = 0; /*parse header*/ data = (u8 *)sec->section; /*look for proper table*/ table_id = data[0]; if (ts->on_event) { switch (table_id) { case GF_M2TS_TABLE_ID_PAT: case GF_M2TS_TABLE_ID_SDT_ACTUAL: case GF_M2TS_TABLE_ID_PMT: case GF_M2TS_TABLE_ID_NIT_ACTUAL: case GF_M2TS_TABLE_ID_TDT: case GF_M2TS_TABLE_ID_TOT: { GF_M2TS_SL_PCK pck; pck.data_len = sec->length; pck.data = sec->section; pck.stream = (GF_M2TS_ES *)ses; ts->on_event(ts, GF_M2TS_EVT_DVB_GENERAL, &pck); } } } has_syntax_indicator = (data[1] & 0x80) ? 1 : 0; if (has_syntax_indicator) { extended_table_id = (data[3]<<8) | data[4]; } else { extended_table_id = 0; } prev_t = NULL; t = sec->table; while (t) { if ((t->table_id==table_id) && (t->ex_table_id == extended_table_id)) break; prev_t = t; t = t->next; } /*create table*/ if (!t) { GF_SAFEALLOC(t, GF_M2TS_Table); if (!t) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc table %d %d\n", table_id, extended_table_id)); return; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Creating table %d %d\n", table_id, extended_table_id)); t->table_id = table_id; t->ex_table_id = extended_table_id; t->last_version_number = 0xFF; t->sections = gf_list_new(); if (prev_t) prev_t->next = t; else sec->table = t; } if (has_syntax_indicator) { if (sec->length < 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section length %d less than CRC \n", sec->length)); } else { /*remove crc32*/ sec->length -= 4; if (gf_m2ts_crc32_check((char *)data, sec->length)) { s32 cur_sec_num; t->version_number = (data[5] >> 1) & 0x1f; if (t->last_section_number && t->section_number && (t->version_number != t->last_version_number)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] table transmission interrupted: previous table (v=%d) %d/%d sections - new table (v=%d) %d/%d sections\n", t->last_version_number, t->section_number, t->last_section_number, t->version_number, data[6] + 1, data[7] + 1) ); gf_m2ts_reset_sections(t->sections); t->section_number = 0; } t->current_next_indicator = (data[5] & 0x1) ? 1 : 0; /*add one to section numbers to detect if we missed or not the first section in the table*/ cur_sec_num = data[6] + 1; t->last_section_number = data[7] + 1; section_start = 8; /*we missed something*/ if (!sec->process_individual && t->section_number + 1 != cur_sec_num) { /* TODO - Check how to handle sections when the first complete section does not have its sec num 0 */ section_valid = 0; if (t->is_init) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted table (lost section %d)\n", cur_sec_num ? cur_sec_num-1 : 31) ); } } else { section_valid = 1; t->section_number = cur_sec_num; } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted section (CRC32 failed)\n")); } } } else { section_valid = 1; section_start = 3; } /*process section*/ if (section_valid) { GF_M2TS_Section *section; GF_SAFEALLOC(section, GF_M2TS_Section); if (!section) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create section\n")); return; } section->data_size = sec->length - section_start; section->data = (unsigned char*)gf_malloc(sizeof(unsigned char)*section->data_size); memcpy(section->data, sec->section + section_start, sizeof(unsigned char)*section->data_size); gf_list_add(t->sections, section); if (t->section_number == 1) { status |= GF_M2TS_TABLE_START; if (t->last_version_number == t->version_number) { t->is_repeat = 1; } else { t->is_repeat = 0; } /*only update version number in the first section of the table*/ t->last_version_number = t->version_number; } if (t->is_init) { if (t->is_repeat) { status |= GF_M2TS_TABLE_REPEAT; } else { status |= GF_M2TS_TABLE_UPDATE; } } else { status |= GF_M2TS_TABLE_FOUND; } if (t->last_section_number == t->section_number) { u32 table_size; status |= GF_M2TS_TABLE_END; table_size = 0; for (i=0; i<gf_list_count(t->sections); i++) { GF_M2TS_Section *section = gf_list_get(t->sections, i); table_size += section->data_size; } if (t->is_repeat) { if (t->table_size != table_size) { status |= GF_M2TS_TABLE_UPDATE; GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Repeated section found with different sizes (old table %d bytes, new table %d bytes)\n", t->table_size, table_size) ); t->table_size = table_size; } } else { t->table_size = table_size; } t->is_init = 1; /*reset section number*/ t->section_number = 0; t->is_repeat = 0; } if (sec->process_individual) { /*send each section of the table and not the aggregated table*/ if (sec->process_section) sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status); gf_m2ts_reset_sections(t->sections); } else { if (status&GF_M2TS_TABLE_END) { if (sec->process_section) sec->process_section(ts, ses, t->sections, t->table_id, t->ex_table_id, t->version_number, (u8) (t->last_section_number - 1), status); gf_m2ts_reset_sections(t->sections); } } } else { sec->cc = -1; t->section_number = 0; } } /*clean-up (including broken sections)*/ if (sec->section) gf_free(sec->section); sec->section = NULL; sec->length = sec->received = 0; } static Bool gf_m2ts_is_long_section(u8 table_id) { switch (table_id) { case GF_M2TS_TABLE_ID_MPEG4_BIFS: case GF_M2TS_TABLE_ID_MPEG4_OD: case GF_M2TS_TABLE_ID_INT: case GF_M2TS_TABLE_ID_EIT_ACTUAL_PF: case GF_M2TS_TABLE_ID_EIT_OTHER_PF: case GF_M2TS_TABLE_ID_ST: case GF_M2TS_TABLE_ID_SIT: case GF_M2TS_TABLE_ID_DSM_CC_PRIVATE: case GF_M2TS_TABLE_ID_MPE_FEC: case GF_M2TS_TABLE_ID_DSM_CC_DOWNLOAD_DATA_MESSAGE: case GF_M2TS_TABLE_ID_DSM_CC_UN_MESSAGE: return 1; default: if (table_id >= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MIN && table_id <= GF_M2TS_TABLE_ID_EIT_SCHEDULE_MAX) return 1; else return 0; } } static u32 gf_m2ts_get_section_length(char byte0, char byte1, char byte2) { u32 length; if (gf_m2ts_is_long_section(byte0)) { length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0xfff ); } else { length = 3 + ( ((((u32)byte1)<<8) | (byte2&0xff)) & 0x3ff ); } return length; } static void gf_m2ts_gather_section(GF_M2TS_Demuxer *ts, GF_M2TS_SectionFilter *sec, GF_M2TS_SECTION_ES *ses, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size) { u32 payload_size = data_size; u8 expect_cc = (sec->cc<0) ? hdr->continuity_counter : (sec->cc + 1) & 0xf; Bool disc = (expect_cc == hdr->continuity_counter) ? 0 : 1; sec->cc = expect_cc; /*may happen if hdr->adaptation_field=2 no payload in TS packet*/ if (!data_size) return; if (hdr->payload_start) { u32 ptr_field; ptr_field = data[0]; if (ptr_field+1>data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid section start (@ptr_field=%d, @data_size=%d)\n", ptr_field, data_size) ); return; } /*end of previous section*/ if (!sec->length && sec->received) { /* the length of the section could not be determined from the previous TS packet because we had only 1 or 2 bytes */ if (sec->received == 1) sec->length = gf_m2ts_get_section_length(sec->section[0], data[1], data[2]); else /* (sec->received == 2) */ sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], data[1]); sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length); } if (sec->length && sec->received + ptr_field >= sec->length) { u32 len = sec->length - sec->received; memcpy(sec->section + sec->received, data+1, sizeof(char)*len); sec->received += len; if (ptr_field > len) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid pointer field (@ptr_field=%d, @remaining=%d)\n", ptr_field, len) ); gf_m2ts_section_complete(ts, sec, ses); } data += ptr_field+1; data_size -= ptr_field+1; payload_size -= ptr_field+1; aggregated_section: if (sec->section) gf_free(sec->section); sec->length = sec->received = 0; sec->section = (char*)gf_malloc(sizeof(char)*data_size); memcpy(sec->section, data, sizeof(char)*data_size); sec->received = data_size; } else if (disc) { if (sec->section) gf_free(sec->section); sec->section = NULL; sec->received = sec->length = 0; return; } else if (!sec->section) { return; } else { if (sec->length && sec->received+data_size > sec->length) data_size = sec->length - sec->received; if (sec->length) { memcpy(sec->section + sec->received, data, sizeof(char)*data_size); } else { sec->section = (char*)gf_realloc(sec->section, sizeof(char)*(sec->received+data_size)); memcpy(sec->section + sec->received, data, sizeof(char)*data_size); } sec->received += data_size; } /*alloc final buffer*/ if (!sec->length && (sec->received >= 3)) { sec->length = gf_m2ts_get_section_length(sec->section[0], sec->section[1], sec->section[2]); sec->section = (char*)gf_realloc(sec->section, sizeof(char)*sec->length); if (sec->received > sec->length) { data_size -= sec->received - sec->length; sec->received = sec->length; } } if (!sec->length || sec->received < sec->length) return; /*OK done*/ gf_m2ts_section_complete(ts, sec, ses); if (payload_size > data_size) { data += data_size; /* detect padding after previous section */ if (data[0] != 0xFF) { data_size = payload_size - data_size; payload_size = data_size; goto aggregated_section; } } } static void gf_m2ts_process_sdt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 pos, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SDT_REPEAT, NULL); return; } if (table_id != GF_M2TS_TABLE_ID_SDT_ACTUAL) { return; } gf_m2ts_reset_sdt(ts); nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] SDT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; //orig_net_id = (data[0] << 8) | data[1]; pos = 3; while (pos < data_size) { GF_M2TS_SDT *sdt; u32 descs_size, d_pos, ulen; GF_SAFEALLOC(sdt, GF_M2TS_SDT); if (!sdt) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to create SDT\n")); return; } gf_list_add(ts->SDTs, sdt); sdt->service_id = (data[pos]<<8) + data[pos+1]; sdt->EIT_schedule = (data[pos+2] & 0x2) ? 1 : 0; sdt->EIT_present_following = (data[pos+2] & 0x1); sdt->running_status = (data[pos+3]>>5) & 0x7; sdt->free_CA_mode = (data[pos+3]>>4) & 0x1; descs_size = ((data[pos+3]&0xf)<<8) | data[pos+4]; pos += 5; d_pos = 0; while (d_pos < descs_size) { u8 d_tag = data[pos+d_pos]; u8 d_len = data[pos+d_pos+1]; switch (d_tag) { case GF_M2TS_DVB_SERVICE_DESCRIPTOR: if (sdt->provider) gf_free(sdt->provider); sdt->provider = NULL; if (sdt->service) gf_free(sdt->service); sdt->service = NULL; d_pos+=2; sdt->service_type = data[pos+d_pos]; ulen = data[pos+d_pos+1]; d_pos += 2; sdt->provider = (char*)gf_malloc(sizeof(char)*(ulen+1)); memcpy(sdt->provider, data+pos+d_pos, sizeof(char)*ulen); sdt->provider[ulen] = 0; d_pos += ulen; ulen = data[pos+d_pos]; d_pos += 1; sdt->service = (char*)gf_malloc(sizeof(char)*(ulen+1)); memcpy(sdt->service, data+pos+d_pos, sizeof(char)*ulen); sdt->service[ulen] = 0; d_pos += ulen; break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) not supported\n", d_tag)); d_pos += d_len; if (d_len == 0) d_pos = descs_size; break; } } pos += descs_size; } evt_type = GF_M2TS_EVT_SDT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } static void gf_m2ts_process_mpeg4section(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_SL_PCK sl_pck; u32 nb_sections, i; GF_M2TS_Section *section; /*skip if already received*/ if (status & GF_M2TS_TABLE_REPEAT) if (!(es->flags & GF_M2TS_ES_SEND_REPEATED_SECTIONS)) return; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Sections for PID %d\n", es->pid) ); /*send all sections (eg SL-packets)*/ nb_sections = gf_list_count(sections); for (i=0; i<nb_sections; i++) { section = (GF_M2TS_Section *)gf_list_get(sections, i); sl_pck.data = (char *)section->data; sl_pck.data_len = section->data_size; sl_pck.stream = (GF_M2TS_ES *)es; sl_pck.version_number = version_number; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck); } } static void gf_m2ts_process_nit(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *nit_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] NIT table processing (not yet implemented)")); } static void gf_m2ts_process_tdt_tot(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *tdt_tot_es, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { unsigned char *data; u32 data_size, nb_sections; u32 date, yp, mp, k; GF_M2TS_Section *section; GF_M2TS_TDT_TOT *time_table; const char *table_name; /*wait for the last section */ if ( !(status & GF_M2TS_TABLE_END) ) return; switch (table_id) { case GF_M2TS_TABLE_ID_TDT: table_name = "TDT"; break; case GF_M2TS_TABLE_ID_TOT: table_name = "TOT"; break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Unimplemented table_id %u for PID %u\n", table_id, GF_M2TS_PID_TDT_TOT_ST)); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] %s on multiple sections not supported\n", table_name)); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; /*TOT only contains 40 bits of UTC_time; TDT add descriptors and a CRC*/ if ((table_id==GF_M2TS_TABLE_ID_TDT) && (data_size != 5)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Corrupted TDT size\n", table_name)); } GF_SAFEALLOC(time_table, GF_M2TS_TDT_TOT); if (!time_table) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Fail to alloc DVB time table\n")); return; } /*UTC_time - see annex C of DVB-SI ETSI EN 300468*/ /* decodes an Modified Julian Date (MJD) into a Co-ordinated Universal Time (UTC) See annex C of DVB-SI ETSI EN 300468 */ date = data[0]*256 + data[1]; yp = (u32)((date - 15078.2)/365.25); mp = (u32)((date - 14956.1 - (u32)(yp * 365.25))/30.6001); time_table->day = (u32)(date - 14956 - (u32)(yp * 365.25) - (u32)(mp * 30.6001)); if (mp == 14 || mp == 15) k = 1; else k = 0; time_table->year = yp + k + 1900; time_table->month = mp - 1 - k*12; time_table->hour = 10*((data[2]&0xf0)>>4) + (data[2]&0x0f); time_table->minute = 10*((data[3]&0xf0)>>4) + (data[3]&0x0f); time_table->second = 10*((data[4]&0xf0)>>4) + (data[4]&0x0f); assert(time_table->hour<24 && time_table->minute<60 && time_table->second<60); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream UTC time is %u/%02u/%02u %02u:%02u:%02u\n", time_table->year, time_table->month, time_table->day, time_table->hour, time_table->minute, time_table->second)); switch (table_id) { case GF_M2TS_TABLE_ID_TDT: if (ts->TDT_time) gf_free(ts->TDT_time); ts->TDT_time = time_table; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TDT, time_table); break; case GF_M2TS_TABLE_ID_TOT: #if 0 { u32 pos, loop_len; loop_len = ((data[5]&0x0f) << 8) | (data[6] & 0xff); data += 7; pos = 0; while (pos < loop_len) { u8 tag = data[pos]; pos += 2; if (tag == GF_M2TS_DVB_LOCAL_TIME_OFFSET_DESCRIPTOR) { char tmp_time[10]; u16 offset_hours, offset_minutes; now->country_code[0] = data[pos]; now->country_code[1] = data[pos+1]; now->country_code[2] = data[pos+2]; now->country_region_id = data[pos+3]>>2; sprintf(tmp_time, "%02x", data[pos+4]); offset_hours = atoi(tmp_time); sprintf(tmp_time, "%02x", data[pos+5]); offset_minutes = atoi(tmp_time); now->local_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60; if (data[pos+3] & 1) now->local_time_offset_seconds *= -1; dvb_decode_mjd_to_unix_time(data+pos+6, &now->unix_next_toc); sprintf(tmp_time, "%02x", data[pos+11]); offset_hours = atoi(tmp_time); sprintf(tmp_time, "%02x", data[pos+12]); offset_minutes = atoi(tmp_time); now->next_time_offset_seconds = (offset_hours * 60 + offset_minutes) * 60; if (data[pos+3] & 1) now->next_time_offset_seconds *= -1; pos+= 13; } } /*TODO: check lengths are ok*/ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table); } #endif /*check CRC32*/ if (ts->tdt_tot->length<4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (less than 4 bytes but CRC32 should be present\n", table_name)); goto error_exit; } if (!gf_m2ts_crc32_check(ts->tdt_tot->section, ts->tdt_tot->length-4)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] corrupted %s table (CRC32 failed)\n", table_name)); goto error_exit; } if (ts->TDT_time) gf_free(ts->TDT_time); ts->TDT_time = time_table; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TOT, time_table); break; default: assert(0); goto error_exit; } return; /*success*/ error_exit: gf_free(time_table); return; } static GF_M2TS_MetadataPointerDescriptor *gf_m2ts_read_metadata_pointer_descriptor(GF_BitStream *bs, u32 length) { u32 size; GF_M2TS_MetadataPointerDescriptor *d; GF_SAFEALLOC(d, GF_M2TS_MetadataPointerDescriptor); if (!d) return NULL; d->application_format = gf_bs_read_u16(bs); size = 2; if (d->application_format == 0xFFFF) { d->application_format_identifier = gf_bs_read_u32(bs); size += 4; } d->format = gf_bs_read_u8(bs); size += 1; if (d->format == 0xFF) { d->format_identifier = gf_bs_read_u32(bs); size += 4; } d->service_id = gf_bs_read_u8(bs); d->locator_record_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE); d->carriage_flag = (enum metadata_carriage)gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 5); /*reserved */ size += 2; if (d->locator_record_flag) { d->locator_length = gf_bs_read_u8(bs); d->locator_data = (char *)gf_malloc(d->locator_length); size += 1 + d->locator_length; gf_bs_read_data(bs, d->locator_data, d->locator_length); } if (d->carriage_flag != 3) { d->program_number = gf_bs_read_u16(bs); size += 2; } if (d->carriage_flag == 1) { d->ts_location = gf_bs_read_u16(bs); d->ts_id = gf_bs_read_u16(bs); size += 4; } if (length-size > 0) { d->data_size = length-size; d->data = (char *)gf_malloc(d->data_size); gf_bs_read_data(bs, d->data, d->data_size); } return d; } static void gf_m2ts_metadata_pointer_descriptor_del(GF_M2TS_MetadataPointerDescriptor *metapd) { if (metapd) { if (metapd->locator_data) gf_free(metapd->locator_data); if (metapd->data) gf_free(metapd->data); gf_free(metapd); } } static GF_M2TS_MetadataDescriptor *gf_m2ts_read_metadata_descriptor(GF_BitStream *bs, u32 length) { u32 size; GF_M2TS_MetadataDescriptor *d; GF_SAFEALLOC(d, GF_M2TS_MetadataDescriptor); if (!d) return NULL; d->application_format = gf_bs_read_u16(bs); size = 2; if (d->application_format == 0xFFFF) { d->application_format_identifier = gf_bs_read_u32(bs); size += 4; } d->format = gf_bs_read_u8(bs); size += 1; if (d->format == 0xFF) { d->format_identifier = gf_bs_read_u32(bs); size += 4; } d->service_id = gf_bs_read_u8(bs); d->decoder_config_flags = gf_bs_read_int(bs, 3); d->dsmcc_flag = (gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE); gf_bs_read_int(bs, 4); /* reserved */ size += 2; if (d->dsmcc_flag) { d->service_id_record_length = gf_bs_read_u8(bs); d->service_id_record = (char *)gf_malloc(d->service_id_record_length); size += 1 + d->service_id_record_length; gf_bs_read_data(bs, d->service_id_record, d->service_id_record_length); } if (d->decoder_config_flags == 1) { d->decoder_config_length = gf_bs_read_u8(bs); d->decoder_config = (char *)gf_malloc(d->decoder_config_length); size += 1 + d->decoder_config_length; gf_bs_read_data(bs, d->decoder_config, d->decoder_config_length); } if (d->decoder_config_flags == 3) { d->decoder_config_id_length = gf_bs_read_u8(bs); d->decoder_config_id = (char *)gf_malloc(d->decoder_config_id_length); size += 1 + d->decoder_config_id_length; gf_bs_read_data(bs, d->decoder_config_id, d->decoder_config_id_length); } if (d->decoder_config_flags == 4) { d->decoder_config_service_id = gf_bs_read_u8(bs); size++; } return d; } static void gf_m2ts_process_pmt(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *pmt, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 info_length, pos, desc_len, evt_type, nb_es,i; u32 nb_sections; u32 data_size; u32 nb_hevc, nb_hevc_temp, nb_shvc, nb_shvc_temp, nb_mhvc, nb_mhvc_temp; unsigned char *data; GF_M2TS_Section *section; GF_Err e = GF_OK; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; nb_es = 0; /*skip if already received but no update detected (eg same data) */ if ((status&GF_M2TS_TABLE_REPEAT) && !(status&GF_M2TS_TABLE_UPDATE)) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program); return; } if (pmt->sec->demux_restarted) { pmt->sec->demux_restarted = 0; return; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PMT Found or updated\n")); nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PMT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; pmt->program->pcr_pid = ((data[0] & 0x1f) << 8) | data[1]; info_length = ((data[2]&0xf)<<8) | data[3]; if (info_length + 4 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT first loop, %d bytes avail but first loop size %d\n", data_size, info_length)); return; } else if (info_length != 0) { /* ...Read Descriptors ... */ u8 tag, len; u32 first_loop_len = 0; tag = data[4]; len = data[5]; while (info_length > first_loop_len) { if (tag == GF_M2TS_MPEG4_IOD_DESCRIPTOR) { if ((len>2) && (len - 2 <= info_length)) { u32 size; GF_BitStream *iod_bs; iod_bs = gf_bs_new((char *)data+8, len-2, GF_BITSTREAM_READ); if (pmt->program->pmt_iod) gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod); e = gf_odf_parse_descriptor(iod_bs , (GF_Descriptor **) &pmt->program->pmt_iod, &size); gf_bs_del(iod_bs ); if (e==GF_OK) { /*remember program number for service/program selection*/ if (pmt->program->pmt_iod) pmt->program->pmt_iod->ServiceID = pmt->program->number; /*if empty IOD (freebox case), discard it and use dynamic declaration of object*/ if (!gf_list_count(pmt->program->pmt_iod->ESDescriptors)) { gf_odf_desc_del((GF_Descriptor *)pmt->program->pmt_iod); pmt->program->pmt_iod = NULL; } } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken IOD! len %d less than 2 bytes to declare IOD\n", len)); } } else if (tag == GF_M2TS_METADATA_POINTER_DESCRIPTOR) { GF_BitStream *metadatapd_bs; GF_M2TS_MetadataPointerDescriptor *metapd; metadatapd_bs = gf_bs_new((char *)data+6, len, GF_BITSTREAM_READ); metapd = gf_m2ts_read_metadata_pointer_descriptor(metadatapd_bs, len); gf_bs_del(metadatapd_bs); if (metapd->application_format_identifier == GF_M2TS_META_ID3 && metapd->format_identifier == GF_M2TS_META_ID3 && metapd->carriage_flag == METADATA_CARRIAGE_SAME_TS) { /*HLS ID3 Metadata */ pmt->program->metadata_pointer_descriptor = metapd; } else { /* don't know what to do with it for now, delete */ gf_m2ts_metadata_pointer_descriptor_del(metapd); } } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Skipping descriptor (0x%x) and others not supported\n", tag)); } first_loop_len += 2 + len; } } if (data_size <= 4 + info_length) return; data += 4 + info_length; data_size -= 4 + info_length; pos = 0; /* count de number of program related PMT received */ for(i=0; i<gf_list_count(ts->programs); i++) { GF_M2TS_Program *prog = (GF_M2TS_Program *)gf_list_get(ts->programs,i); if(prog->pmt_pid == pmt->pid) { break; } } nb_hevc = nb_hevc_temp = nb_shvc = nb_shvc_temp = nb_mhvc = nb_mhvc_temp = 0; while (pos<data_size) { GF_M2TS_PES *pes = NULL; GF_M2TS_SECTION_ES *ses = NULL; GF_M2TS_ES *es = NULL; Bool inherit_pcr = 0; u32 pid, stream_type, reg_desc_format; if (pos + 5 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT! size %d but position %d and need at least 5 bytes to declare es\n", data_size, pos)); break; } stream_type = data[0]; pid = ((data[1] & 0x1f) << 8) | data[2]; desc_len = ((data[3] & 0xf) << 8) | data[4]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("stream_type :%d \n",stream_type)); switch (stream_type) { /* PES */ case GF_M2TS_VIDEO_MPEG1: case GF_M2TS_VIDEO_MPEG2: case GF_M2TS_VIDEO_DCII: case GF_M2TS_VIDEO_MPEG4: case GF_M2TS_SYSTEMS_MPEG4_PES: case GF_M2TS_VIDEO_H264: case GF_M2TS_VIDEO_SVC: case GF_M2TS_VIDEO_MVCD: case GF_M2TS_VIDEO_HEVC: case GF_M2TS_VIDEO_HEVC_MCTS: case GF_M2TS_VIDEO_HEVC_TEMPORAL: case GF_M2TS_VIDEO_SHVC: case GF_M2TS_VIDEO_SHVC_TEMPORAL: case GF_M2TS_VIDEO_MHVC: case GF_M2TS_VIDEO_MHVC_TEMPORAL: inherit_pcr = 1; case GF_M2TS_AUDIO_MPEG1: case GF_M2TS_AUDIO_MPEG2: case GF_M2TS_AUDIO_AAC: case GF_M2TS_AUDIO_LATM_AAC: case GF_M2TS_AUDIO_AC3: case GF_M2TS_AUDIO_DTS: case GF_M2TS_MHAS_MAIN: case GF_M2TS_MHAS_AUX: case GF_M2TS_SUBTITLE_DVB: case GF_M2TS_METADATA_PES: GF_SAFEALLOC(pes, GF_M2TS_PES); if (!pes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } pes->cc = -1; pes->flags = GF_M2TS_ES_IS_PES; if (inherit_pcr) pes->flags |= GF_M2TS_INHERIT_PCR; es = (GF_M2TS_ES *)pes; break; case GF_M2TS_PRIVATE_DATA: GF_SAFEALLOC(pes, GF_M2TS_PES); if (!pes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } pes->cc = -1; pes->flags = GF_M2TS_ES_IS_PES; es = (GF_M2TS_ES *)pes; break; /* Sections */ case GF_M2TS_SYSTEMS_MPEG4_SECTIONS: GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES); if (!ses) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } es = (GF_M2TS_ES *)ses; es->flags |= GF_M2TS_ES_IS_SECTION; /* carriage of ISO_IEC_14496 data in sections */ if (stream_type == GF_M2TS_SYSTEMS_MPEG4_SECTIONS) { /*MPEG-4 sections need to be fully checked: if one section is lost, this means we lost one SL packet in the AU so we must wait for the complete section again*/ ses->sec = gf_m2ts_section_filter_new(gf_m2ts_process_mpeg4section, 0); /*create OD container*/ if (!pmt->program->additional_ods) { pmt->program->additional_ods = gf_list_new(); ts->has_4on2 = 1; } } break; case GF_M2TS_13818_6_ANNEX_A: case GF_M2TS_13818_6_ANNEX_B: case GF_M2TS_13818_6_ANNEX_C: case GF_M2TS_13818_6_ANNEX_D: case GF_M2TS_PRIVATE_SECTION: case GF_M2TS_QUALITY_SEC: case GF_M2TS_MORE_SEC: GF_SAFEALLOC(ses, GF_M2TS_SECTION_ES); if (!ses) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG2TS] Failed to allocate ES for pid %d\n", pid)); return; } es = (GF_M2TS_ES *)ses; es->flags |= GF_M2TS_ES_IS_SECTION; es->pid = pid; es->service_id = pmt->program->number; if (stream_type == GF_M2TS_PRIVATE_SECTION) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("AIT sections on pid %d\n", pid)); } else if (stream_type == GF_M2TS_QUALITY_SEC) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Quality metadata sections on pid %d\n", pid)); } else if (stream_type == GF_M2TS_MORE_SEC) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("MORE sections on pid %d\n", pid)); } else { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type DSM CC user private sections on pid %d \n", pid)); } /* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */ ses->sec = gf_m2ts_section_filter_new(NULL, 1); //ses->sec->service_id = pmt->program->number; break; case GF_M2TS_MPE_SECTIONS: if (! ts->prefix_present) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("stream type MPE found : pid = %d \n", pid)); #ifdef GPAC_ENABLE_MPE es = gf_dvb_mpe_section_new(); if (es->flags & GF_M2TS_ES_IS_SECTION) { /* NULL means: trigger the call to on_event with DVB_GENERAL type and the raw section as payload */ ((GF_M2TS_SECTION_ES*)es)->sec = gf_m2ts_section_filter_new(NULL, 1); } #endif break; } default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); //GF_LOG(/*GF_LOG_WARNING*/GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); break; } if (es) { es->stream_type = (stream_type==GF_M2TS_PRIVATE_DATA) ? 0 : stream_type; es->program = pmt->program; es->pid = pid; es->component_tag = -1; } pos += 5; data += 5; while (desc_len) { if (pos + 2 > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d but position %d and need at least 2 bytes to parse descritpor\n", data_size, pos)); break; } u8 tag = data[0]; u32 len = data[1]; if (pos + 2 + len > data_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Broken PMT descriptor! size %d, desc size %d but position %d\n", data_size, len, pos)); break; } if (es) { switch (tag) { case GF_M2TS_ISO_639_LANGUAGE_DESCRIPTOR: if (pes && (len>=3) ) pes->lang = GF_4CC(' ', data[2], data[3], data[4]); break; case GF_M2TS_MPEG4_SL_DESCRIPTOR: if (len>=2) { es->mpeg4_es_id = ( (u32) data[2] & 0x1f) << 8 | data[3]; es->flags |= GF_M2TS_ES_IS_SL; } break; case GF_M2TS_REGISTRATION_DESCRIPTOR: if (len>=4) { reg_desc_format = GF_4CC(data[2], data[3], data[4], data[5]); /*cf http://www.smpte-ra.org/mpegreg/mpegreg.html*/ switch (reg_desc_format) { case GF_M2TS_RA_STREAM_AC3: es->stream_type = GF_M2TS_AUDIO_AC3; break; case GF_M2TS_RA_STREAM_VC1: es->stream_type = GF_M2TS_VIDEO_VC1; break; case GF_M2TS_RA_STREAM_GPAC: if (len==8) { es->stream_type = GF_4CC(data[6], data[7], data[8], data[9]); es->flags |= GF_M2TS_GPAC_CODEC_ID; break; } default: GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("Unknown registration descriptor %s\n", gf_4cc_to_str(reg_desc_format) )); break; } } break; case GF_M2TS_DVB_EAC3_DESCRIPTOR: es->stream_type = GF_M2TS_AUDIO_EC3; break; case GF_M2TS_DVB_DATA_BROADCAST_ID_DESCRIPTOR: if (len>=2) { u32 id = data[2]<<8 | data[3]; if ((id == 0xB) && ses && !ses->sec) { ses->sec = gf_m2ts_section_filter_new(NULL, 1); } } break; case GF_M2TS_DVB_SUBTITLING_DESCRIPTOR: if (pes && (len>=8)) { pes->sub.language[0] = data[2]; pes->sub.language[1] = data[3]; pes->sub.language[2] = data[4]; pes->sub.type = data[5]; pes->sub.composition_page_id = (data[6]<<8) | data[7]; pes->sub.ancillary_page_id = (data[8]<<8) | data[9]; } es->stream_type = GF_M2TS_DVB_SUBTITLE; break; case GF_M2TS_DVB_STREAM_IDENTIFIER_DESCRIPTOR: if (len>=1) { es->component_tag = data[2]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Component Tag: %d on Program %d\n", es->component_tag, es->program->number)); } break; case GF_M2TS_DVB_TELETEXT_DESCRIPTOR: es->stream_type = GF_M2TS_DVB_TELETEXT; break; case GF_M2TS_DVB_VBI_DATA_DESCRIPTOR: es->stream_type = GF_M2TS_DVB_VBI; break; case GF_M2TS_HIERARCHY_DESCRIPTOR: if (pes && (len>=4)) { u8 hierarchy_embedded_layer_index; GF_BitStream *hbs = gf_bs_new((const char *)data, data_size, GF_BITSTREAM_READ); /*u32 skip = */gf_bs_read_int(hbs, 16); /*u8 res1 = */gf_bs_read_int(hbs, 1); /*u8 temp_scal = */gf_bs_read_int(hbs, 1); /*u8 spatial_scal = */gf_bs_read_int(hbs, 1); /*u8 quality_scal = */gf_bs_read_int(hbs, 1); /*u8 hierarchy_type = */gf_bs_read_int(hbs, 4); /*u8 res2 = */gf_bs_read_int(hbs, 2); /*u8 hierarchy_layer_index = */gf_bs_read_int(hbs, 6); /*u8 tref_not_present = */gf_bs_read_int(hbs, 1); /*u8 res3 = */gf_bs_read_int(hbs, 1); hierarchy_embedded_layer_index = gf_bs_read_int(hbs, 6); /*u8 res4 = */gf_bs_read_int(hbs, 2); /*u8 hierarchy_channel = */gf_bs_read_int(hbs, 6); gf_bs_del(hbs); pes->depends_on_pid = 1+hierarchy_embedded_layer_index; } break; case GF_M2TS_METADATA_DESCRIPTOR: { GF_BitStream *metadatad_bs; GF_M2TS_MetadataDescriptor *metad; metadatad_bs = gf_bs_new((char *)data+2, len, GF_BITSTREAM_READ); metad = gf_m2ts_read_metadata_descriptor(metadatad_bs, len); gf_bs_del(metadatad_bs); if (metad->application_format_identifier == GF_M2TS_META_ID3 && metad->format_identifier == GF_M2TS_META_ID3) { /*HLS ID3 Metadata */ if (pes) { pes->metadata_descriptor = metad; pes->stream_type = GF_M2TS_METADATA_ID3_HLS; } } else { /* don't know what to do with it for now, delete */ gf_m2ts_metadata_descriptor_del(metad); } } break; default: GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] skipping descriptor (0x%x) not supported\n", tag)); break; } } data += len+2; pos += len+2; if (desc_len < len+2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Invalid PMT es descriptor size for PID %d\n", pid ) ); break; } desc_len-=len+2; } if (es && !es->stream_type) { gf_free(es); es = NULL; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Private Stream type (0x%x) for PID %d not supported\n", stream_type, pid ) ); } if (!es) continue; if (ts->ess[pid]) { //this is component reuse across programs, overwrite the previously declared stream ... if (status & GF_M2TS_TABLE_FOUND) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d reused across programs %d and %d, not completely supported\n", pid, ts->ess[pid]->program->number, es->program->number ) ); //add stream to program but don't reassign the pid table until the stream is playing (>GF_M2TS_PES_FRAMING_SKIP) gf_list_add(pmt->program->streams, es); if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP); nb_es++; //skip assignment below es = NULL; } /*watchout for pmt update - FIXME this likely won't work in most cases*/ else { GF_M2TS_ES *o_es = ts->ess[es->pid]; if ((o_es->stream_type == es->stream_type) && ((o_es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK) == (es->flags & GF_M2TS_ES_STATIC_FLAGS_MASK)) && (o_es->mpeg4_es_id == es->mpeg4_es_id) && ((o_es->flags & GF_M2TS_ES_IS_SECTION) || ((GF_M2TS_PES *)o_es)->lang == ((GF_M2TS_PES *)es)->lang) ) { gf_free(es); es = NULL; } else { gf_m2ts_es_del(o_es, ts); ts->ess[es->pid] = NULL; } } } if (es) { ts->ess[es->pid] = es; gf_list_add(pmt->program->streams, es); if (!(es->flags & GF_M2TS_ES_IS_SECTION) ) gf_m2ts_set_pes_framing(pes, GF_M2TS_PES_FRAMING_SKIP); nb_es++; if (es->stream_type == GF_M2TS_VIDEO_HEVC) nb_hevc++; else if (es->stream_type == GF_M2TS_VIDEO_HEVC_TEMPORAL) nb_hevc_temp++; else if (es->stream_type == GF_M2TS_VIDEO_SHVC) nb_shvc++; else if (es->stream_type == GF_M2TS_VIDEO_SHVC_TEMPORAL) nb_shvc_temp++; else if (es->stream_type == GF_M2TS_VIDEO_MHVC) nb_mhvc++; else if (es->stream_type == GF_M2TS_VIDEO_MHVC_TEMPORAL) nb_mhvc_temp++; } } //Table 2-139, implied hierarchy indexes if (nb_hevc_temp + nb_shvc + nb_shvc_temp + nb_mhvc+ nb_mhvc_temp) { for (i=0; i<gf_list_count(pmt->program->streams); i++) { GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i); if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue; if (es->depends_on_pid) continue; switch (es->stream_type) { case GF_M2TS_VIDEO_HEVC_TEMPORAL: es->depends_on_pid = 1; break; case GF_M2TS_VIDEO_SHVC: if (!nb_hevc_temp) es->depends_on_pid = 1; else es->depends_on_pid = 2; break; case GF_M2TS_VIDEO_SHVC_TEMPORAL: es->depends_on_pid = 3; break; case GF_M2TS_VIDEO_MHVC: if (!nb_hevc_temp) es->depends_on_pid = 1; else es->depends_on_pid = 2; break; case GF_M2TS_VIDEO_MHVC_TEMPORAL: if (!nb_hevc_temp) es->depends_on_pid = 2; else es->depends_on_pid = 3; break; } } } if (nb_es) { u32 i; //translate hierarchy descriptors indexes into PIDs - check whether the PMT-index rules are the same for HEVC for (i=0; i<gf_list_count(pmt->program->streams); i++) { GF_M2TS_PES *an_es = NULL; GF_M2TS_PES *es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, i); if ( !(es->flags & GF_M2TS_ES_IS_PES)) continue; if (!es->depends_on_pid) continue; //fixeme we are not always assured that hierarchy_layer_index matches the stream index... //+1 is because our first stream is the PMT an_es = (GF_M2TS_PES *)gf_list_get(pmt->program->streams, es->depends_on_pid); if (an_es) { es->depends_on_pid = an_es->pid; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS] Wrong dependency index in hierarchy descriptor, assuming non-scalable stream\n")); es->depends_on_pid = 0; } } evt_type = (status&GF_M2TS_TABLE_FOUND) ? GF_M2TS_EVT_PMT_FOUND : GF_M2TS_EVT_PMT_UPDATE; if (ts->on_event) ts->on_event(ts, evt_type, pmt->program); } else { /* if we found no new ES it's simply a repeat of the PMT */ if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PMT_REPEAT, pmt->program); } } static void gf_m2ts_process_pat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs, evt_type; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PAT_REPEAT, NULL); return; } nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("PAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; if (!(status&GF_M2TS_TABLE_UPDATE) && gf_list_count(ts->programs)) { if (ts->pat->demux_restarted) { ts->pat->demux_restarted = 0; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Multiple different PAT on single TS found, ignoring new PAT declaration (table id %d - extended table id %d)\n", table_id, ex_table_id)); } return; } nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else { GF_SAFEALLOC(prog, GF_M2TS_Program); if (!prog) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate program for pid %d\n", pid)); return; } prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; prog->ts = ts; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); if (!pmt) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Fail to allocate pmt filter for pid %d\n", pid)); return; } pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_PAT_UPDATE : GF_M2TS_EVT_PAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } static void gf_m2ts_process_cat(GF_M2TS_Demuxer *ts, GF_M2TS_SECTION_ES *ses, GF_List *sections, u8 table_id, u16 ex_table_id, u8 version_number, u8 last_section_number, u32 status) { u32 evt_type; /* GF_M2TS_Program *prog; GF_M2TS_SECTION_ES *pmt; u32 i, nb_progs; u32 nb_sections; u32 data_size; unsigned char *data; GF_M2TS_Section *section; */ /*wait for the last section */ if (!(status&GF_M2TS_TABLE_END)) return; /*skip if already received*/ if (status&GF_M2TS_TABLE_REPEAT) { if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_CAT_REPEAT, NULL); return; } /* nb_sections = gf_list_count(sections); if (nb_sections > 1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("CAT on multiple sections not supported\n")); } section = (GF_M2TS_Section *)gf_list_get(sections, 0); data = section->data; data_size = section->data_size; nb_progs = data_size / 4; for (i=0; i<nb_progs; i++) { u16 number, pid; number = (data[0]<<8) | data[1]; pid = (data[2]&0x1f)<<8 | data[3]; data += 4; if (number==0) { if (!ts->nit) { ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); } } else { GF_SAFEALLOC(prog, GF_M2TS_Program); prog->streams = gf_list_new(); prog->pmt_pid = pid; prog->number = number; gf_list_add(ts->programs, prog); GF_SAFEALLOC(pmt, GF_M2TS_SECTION_ES); pmt->flags = GF_M2TS_ES_IS_SECTION; gf_list_add(prog->streams, pmt); pmt->pid = prog->pmt_pid; pmt->program = prog; ts->ess[pmt->pid] = (GF_M2TS_ES *)pmt; pmt->sec = gf_m2ts_section_filter_new(gf_m2ts_process_pmt, 0); } } */ evt_type = (status&GF_M2TS_TABLE_UPDATE) ? GF_M2TS_EVT_CAT_UPDATE : GF_M2TS_EVT_CAT_FOUND; if (ts->on_event) ts->on_event(ts, evt_type, NULL); } u64 gf_m2ts_get_pts(unsigned char *data) { u64 pts; u32 val; pts = (u64)((data[0] >> 1) & 0x07) << 30; val = (data[1] << 8) | data[2]; pts |= (u64)(val >> 1) << 15; val = (data[3] << 8) | data[4]; pts |= (u64)(val >> 1); return pts; } void gf_m2ts_pes_header(GF_M2TS_PES *pes, unsigned char *data, u32 data_size, GF_M2TS_PESHeader *pesh) { u32 has_pts, has_dts; u32 len_check; memset(pesh, 0, sizeof(GF_M2TS_PESHeader)); len_check = 0; pesh->id = data[0]; pesh->pck_len = (data[1]<<8) | data[2]; /* 2bits scrambling_control = gf_bs_read_int(bs,2); priority = gf_bs_read_int(bs,1); */ pesh->data_alignment = (data[3] & 0x4) ? 1 : 0; /* copyright = gf_bs_read_int(bs,1); original = gf_bs_read_int(bs,1); */ has_pts = (data[4]&0x80); has_dts = has_pts ? (data[4]&0x40) : 0; /* ESCR_flag = gf_bs_read_int(bs,1); ES_rate_flag = gf_bs_read_int(bs,1); DSM_flag = gf_bs_read_int(bs,1); additional_copy_flag = gf_bs_read_int(bs,1); prev_crc_flag = gf_bs_read_int(bs,1); extension_flag = gf_bs_read_int(bs,1); */ pesh->hdr_data_len = data[5]; data += 6; if (has_pts) { pesh->PTS = gf_m2ts_get_pts(data); data+=5; len_check += 5; } if (has_dts) { pesh->DTS = gf_m2ts_get_pts(data); //data+=5; len_check += 5; } else { pesh->DTS = pesh->PTS; } if (len_check < pesh->hdr_data_len) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Skipping %d bytes in pes header\n", pes->pid, pesh->hdr_data_len - len_check)); } else if (len_check > pesh->hdr_data_len) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong pes_header_data_length field %d bytes - read %d\n", pes->pid, pesh->hdr_data_len, len_check)); } if ((pesh->PTS<90000) && ((s32)pesh->DTS<0)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Wrong DTS %d negative for PTS %d - forcing to 0\n", pes->pid, pesh->DTS, pesh->PTS)); pesh->DTS=0; } } static void gf_m2ts_store_temi(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes) { GF_BitStream *bs = gf_bs_new(pes->temi_tc_desc, pes->temi_tc_desc_len, GF_BITSTREAM_READ); u32 has_timestamp = gf_bs_read_int(bs, 2); Bool has_ntp = (Bool) gf_bs_read_int(bs, 1); /*u32 has_ptp = */gf_bs_read_int(bs, 1); /*u32 has_timecode = */gf_bs_read_int(bs, 2); memset(&pes->temi_tc, 0, sizeof(GF_M2TS_TemiTimecodeDescriptor)); pes->temi_tc.force_reload = gf_bs_read_int(bs, 1); pes->temi_tc.is_paused = gf_bs_read_int(bs, 1); pes->temi_tc.is_discontinuity = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); pes->temi_tc.timeline_id = gf_bs_read_int(bs, 8); if (has_timestamp) { pes->temi_tc.media_timescale = gf_bs_read_u32(bs); if (has_timestamp==2) pes->temi_tc.media_timestamp = gf_bs_read_u64(bs); else pes->temi_tc.media_timestamp = gf_bs_read_u32(bs); } if (has_ntp) { pes->temi_tc.ntp = gf_bs_read_u64(bs); } gf_bs_del(bs); pes->temi_tc_desc_len = 0; pes->temi_pending = 1; } void gf_m2ts_flush_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes) { GF_M2TS_PESHeader pesh; if (!ts) return; /*we need at least a full, valid start code and PES header !!*/ if ((pes->pck_data_len >= 4) && !pes->pck_data[0] && !pes->pck_data[1] && (pes->pck_data[2] == 0x1)) { u32 len; Bool has_pes_header = GF_TRUE; u32 stream_id = pes->pck_data[3]; Bool same_pts = GF_FALSE; switch (stream_id) { case GF_M2_STREAMID_PROGRAM_STREAM_MAP: case GF_M2_STREAMID_PADDING: case GF_M2_STREAMID_PRIVATE_2: case GF_M2_STREAMID_ECM: case GF_M2_STREAMID_EMM: case GF_M2_STREAMID_PROGRAM_STREAM_DIRECTORY: case GF_M2_STREAMID_DSMCC: case GF_M2_STREAMID_H222_TYPE_E: has_pes_header = GF_FALSE; break; } if (has_pes_header) { /*OK read header*/ gf_m2ts_pes_header(pes, pes->pck_data + 3, pes->pck_data_len - 3, &pesh); /*send PES timing*/ if (ts->notify_pes_timing) { GF_M2TS_PES_PCK pck; memset(&pck, 0, sizeof(GF_M2TS_PES_PCK)); pck.PTS = pesh.PTS; pck.DTS = pesh.DTS; pck.stream = pes; if (pes->rap) pck.flags |= GF_M2TS_PES_PCK_RAP; pes->pes_end_packet_number = ts->pck_number; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_PES_TIMING, &pck); } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d Got PES header DTS %d PTS %d\n", pes->pid, pesh.DTS, pesh.PTS)); if (pesh.PTS) { if (pesh.PTS == pes->PTS) { same_pts = GF_TRUE; GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same PTS "LLU" for two consecutive PES packets \n", pes->pid, pes->PTS)); } #ifndef GPAC_DISABLE_LOG /*FIXME - this test should only be done for non bi-directionnally coded media else if (pesh.PTS < pes->PTS) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - PTS "LLU" less than previous packet PTS "LLU"\n", pes->pid, pesh.PTS, pes->PTS) ); } */ #endif pes->PTS = pesh.PTS; #ifndef GPAC_DISABLE_LOG { if (pes->DTS && (pesh.DTS == pes->DTS)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - same DTS "LLU" for two consecutive PES packets \n", pes->pid, pes->DTS)); } if (pesh.DTS < pes->DTS) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d - DTS "LLU" less than previous DTS "LLU"\n", pes->pid, pesh.DTS, pes->DTS)); } } #endif pes->DTS = pesh.DTS; } /*no PTSs were coded, same time*/ else if (!pesh.hdr_data_len) { same_pts = GF_TRUE; } /*3-byte start-code + 6 bytes header + hdr extensions*/ len = 9 + pesh.hdr_data_len; } else { /*3-byte start-code + 1 byte streamid*/ len = 4; memset(&pesh, 0, sizeof(pesh)); } if ((u8) pes->pck_data[3]==0xfa) { GF_M2TS_SL_PCK sl_pck; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] SL Packet in PES for %d - ES ID %d\n", pes->pid, pes->mpeg4_es_id)); if (pes->pck_data_len > len) { sl_pck.data = (char *)pes->pck_data + len; sl_pck.data_len = pes->pck_data_len - len; sl_pck.stream = (GF_M2TS_ES *)pes; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_SL_PCK, &sl_pck); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] Bad SL Packet size: (%d indicated < %d header)\n", pes->pid, pes->pck_data_len, len)); } } else if (pes->reframe) { u32 remain = 0; u32 offset = len; if (pesh.pck_len && (pesh.pck_len-3-pesh.hdr_data_len != pes->pck_data_len-len)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES payload size %d but received %d bytes\n", pes->pid, (u32) ( pesh.pck_len-3-pesh.hdr_data_len), pes->pck_data_len-len)); } //copy over the remaining of previous PES payload before start of this PES payload if (pes->prev_data_len) { if (pes->prev_data_len < len) { offset = len - pes->prev_data_len; memcpy(pes->pck_data + offset, pes->prev_data, pes->prev_data_len); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PES reassembly buffer overflow (%d bytes not processed from previous PES) - discarding prev data\n", pes->pid, pes->prev_data_len )); } } if (!pes->temi_pending && pes->temi_tc_desc_len) { gf_m2ts_store_temi(ts, pes); } if (pes->temi_pending) { pes->temi_pending = 0; pes->temi_tc.pes_pts = pes->PTS; if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_TIMECODE, &pes->temi_tc); } if (! ts->seek_mode) remain = pes->reframe(ts, pes, same_pts, pes->pck_data+offset, pes->pck_data_len-offset, &pesh); //CLEANUP alloc stuff if (pes->prev_data) gf_free(pes->prev_data); pes->prev_data = NULL; pes->prev_data_len = 0; if (remain) { pes->prev_data = gf_malloc(sizeof(char)*remain); assert(pes->pck_data_len >= remain); memcpy(pes->prev_data, pes->pck_data + pes->pck_data_len - remain, remain); pes->prev_data_len = remain; } } } else if (pes->pck_data_len) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Bad PES Header, discarding packet (maybe stream is encrypted ?)\n", pes->pid)); } pes->pck_data_len = 0; pes->pes_len = 0; pes->rap = 0; } static void gf_m2ts_process_pes(GF_M2TS_Demuxer *ts, GF_M2TS_PES *pes, GF_M2TS_Header *hdr, unsigned char *data, u32 data_size, GF_M2TS_AdaptationField *paf) { u8 expect_cc; Bool disc=0; Bool flush_pes = 0; /*duplicated packet, NOT A DISCONTINUITY, we should discard the packet - however we may encounter this configuration in DASH at segment boundaries. If payload start is set, ignore duplication*/ if (hdr->continuity_counter==pes->cc) { if (!hdr->payload_start || (hdr->adaptation_field!=3) ) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Duplicated Packet found (CC %d) - skipping\n", pes->pid, pes->cc)); return; } } else { expect_cc = (pes->cc<0) ? hdr->continuity_counter : (pes->cc + 1) & 0xf; if (expect_cc != hdr->continuity_counter) disc = 1; } pes->cc = hdr->continuity_counter; if (disc) { if (pes->flags & GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY) { pes->flags &= ~GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY; disc = 0; } if (disc) { if (hdr->payload_start) { if (pes->pck_data_len) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - may have lost end of previous PES\n", pes->pid, expect_cc, hdr->continuity_counter)); } } else { if (pes->pck_data_len) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PES %d: Packet discontinuity (%d expected - got %d) - trashing PES packet\n", pes->pid, expect_cc, hdr->continuity_counter)); } pes->pck_data_len = 0; pes->pes_len = 0; pes->cc = -1; return; } } } if (!pes->reframe) return; if (hdr->payload_start) { flush_pes = 1; pes->pes_start_packet_number = ts->pck_number; pes->before_last_pcr_value = pes->program->before_last_pcr_value; pes->before_last_pcr_value_pck_number = pes->program->before_last_pcr_value_pck_number; pes->last_pcr_value = pes->program->last_pcr_value; pes->last_pcr_value_pck_number = pes->program->last_pcr_value_pck_number; } else if (pes->pes_len && (pes->pck_data_len + data_size == pes->pes_len + 6)) { /* 6 = startcode+stream_id+length*/ /*reassemble pes*/ if (pes->pck_data_len + data_size > pes->pck_alloc_len) { pes->pck_alloc_len = pes->pck_data_len + data_size; pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len); } memcpy(pes->pck_data+pes->pck_data_len, data, data_size); pes->pck_data_len += data_size; /*force discard*/ data_size = 0; flush_pes = 1; } /*PES first fragment: flush previous packet*/ if (flush_pes && pes->pck_data_len) { gf_m2ts_flush_pes(ts, pes); if (!data_size) return; } /*we need to wait for first packet of PES*/ if (!pes->pck_data_len && !hdr->payload_start) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Waiting for PES header, trashing data\n", hdr->pid)); return; } /*reassemble*/ if (pes->pck_data_len + data_size > pes->pck_alloc_len ) { pes->pck_alloc_len = pes->pck_data_len + data_size; pes->pck_data = (u8*)gf_realloc(pes->pck_data, pes->pck_alloc_len); } memcpy(pes->pck_data + pes->pck_data_len, data, data_size); pes->pck_data_len += data_size; if (paf && paf->random_access_indicator) pes->rap = 1; if (hdr->payload_start && !pes->pes_len && (pes->pck_data_len>=6)) { pes->pes_len = (pes->pck_data[4]<<8) | pes->pck_data[5]; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Got PES packet len %d\n", pes->pid, pes->pes_len)); if (pes->pes_len + 6 == pes->pck_data_len) { gf_m2ts_flush_pes(ts, pes); } } } static void gf_m2ts_get_adaptation_field(GF_M2TS_Demuxer *ts, GF_M2TS_AdaptationField *paf, unsigned char *data, u32 size, u32 pid) { unsigned char *af_extension; paf->discontinuity_indicator = (data[0] & 0x80) ? 1 : 0; paf->random_access_indicator = (data[0] & 0x40) ? 1 : 0; paf->priority_indicator = (data[0] & 0x20) ? 1 : 0; paf->PCR_flag = (data[0] & 0x10) ? 1 : 0; paf->OPCR_flag = (data[0] & 0x8) ? 1 : 0; paf->splicing_point_flag = (data[0] & 0x4) ? 1 : 0; paf->transport_private_data_flag = (data[0] & 0x2) ? 1 : 0; paf->adaptation_field_extension_flag = (data[0] & 0x1) ? 1 : 0; af_extension = data + 1; if (paf->PCR_flag == 1) { u32 base = ((u32)data[1] << 24) | ((u32)data[2] << 16) | ((u32)data[3] << 8) | (u32)data[4]; u64 PCR = (u64) base; paf->PCR_base = (PCR << 1) | (data[5] >> 7); paf->PCR_ext = ((data[5] & 1) << 8) | data[6]; af_extension += 6; } if (paf->adaptation_field_extension_flag) { u32 afext_bytes; Bool ltw_flag, pwr_flag, seamless_flag, af_desc_not_present; if (paf->OPCR_flag) { af_extension += 6; } if (paf->splicing_point_flag) { af_extension += 1; } if (paf->transport_private_data_flag) { u32 priv_bytes = af_extension[0]; af_extension += 1 + priv_bytes; } afext_bytes = af_extension[0]; ltw_flag = af_extension[1] & 0x80 ? 1 : 0; pwr_flag = af_extension[1] & 0x40 ? 1 : 0; seamless_flag = af_extension[1] & 0x20 ? 1 : 0; af_desc_not_present = af_extension[1] & 0x10 ? 1 : 0; af_extension += 2; if (!afext_bytes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=1; if (ltw_flag) { af_extension += 2; if (afext_bytes<2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=2; } if (pwr_flag) { af_extension += 3; if (afext_bytes<3) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=3; } if (seamless_flag) { af_extension += 3; if (afext_bytes<3) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Extension found\n", pid)); return; } afext_bytes-=3; } if (! af_desc_not_present) { while (afext_bytes) { GF_BitStream *bs; char *desc; u8 desc_tag = af_extension[0]; u8 desc_len = af_extension[1]; if (!desc_len || (u32) desc_len+2 > afext_bytes) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Bad Adaptation Descriptor found (tag %d) size is %d but only %d bytes available\n", pid, desc_tag, desc_len, afext_bytes)); break; } desc = (char *) af_extension+2; bs = gf_bs_new(desc, desc_len, GF_BITSTREAM_READ); switch (desc_tag) { case GF_M2TS_AFDESC_LOCATION_DESCRIPTOR: { Bool use_base_temi_url; char URL[255]; GF_M2TS_TemiLocationDescriptor temi_loc; memset(&temi_loc, 0, sizeof(GF_M2TS_TemiLocationDescriptor) ); temi_loc.reload_external = gf_bs_read_int(bs, 1); temi_loc.is_announce = gf_bs_read_int(bs, 1); temi_loc.is_splicing = gf_bs_read_int(bs, 1); use_base_temi_url = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 5); //reserved temi_loc.timeline_id = gf_bs_read_int(bs, 7); if (!use_base_temi_url) { char *_url = URL; u8 scheme = gf_bs_read_int(bs, 8); u8 url_len = gf_bs_read_int(bs, 8); switch (scheme) { case 1: strcpy(URL, "http://"); _url = URL+7; break; case 2: strcpy(URL, "https://"); _url = URL+8; break; } gf_bs_read_data(bs, _url, url_len); _url[url_len] = 0; } temi_loc.external_URL = URL; GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Location descriptor found - URL %s\n", pid, URL)); if (ts->on_event) ts->on_event(ts, GF_M2TS_EVT_TEMI_LOCATION, &temi_loc); } break; case GF_M2TS_AFDESC_TIMELINE_DESCRIPTOR: if (ts->ess[pid] && (ts->ess[pid]->flags & GF_M2TS_ES_IS_PES)) { GF_M2TS_PES *pes = (GF_M2TS_PES *) ts->ess[pid]; if (pes->temi_tc_desc_len) gf_m2ts_store_temi(ts, pes); if (pes->temi_tc_desc_alloc_size < desc_len) { pes->temi_tc_desc = gf_realloc(pes->temi_tc_desc, desc_len); pes->temi_tc_desc_alloc_size = desc_len; } memcpy(pes->temi_tc_desc, desc, desc_len); pes->temi_tc_desc_len = desc_len; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d AF Timeline descriptor found\n", pid)); } break; } gf_bs_del(bs); af_extension += 2+desc_len; afext_bytes -= 2+desc_len; } } } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d: Adaptation Field found: Discontinuity %d - RAP %d - PCR: "LLD"\n", pid, paf->discontinuity_indicator, paf->random_access_indicator, paf->PCR_flag ? paf->PCR_base * 300 + paf->PCR_ext : 0)); } static GF_Err gf_m2ts_process_packet(GF_M2TS_Demuxer *ts, unsigned char *data) { GF_M2TS_ES *es; GF_M2TS_Header hdr; GF_M2TS_AdaptationField af, *paf; u32 payload_size, af_size; u32 pos = 0; ts->pck_number++; /* read TS packet header*/ hdr.sync = data[0]; if (hdr.sync != 0x47) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d does not start with sync marker\n", ts->pck_number)); return GF_CORRUPTED_DATA; } hdr.error = (data[1] & 0x80) ? 1 : 0; hdr.payload_start = (data[1] & 0x40) ? 1 : 0; hdr.priority = (data[1] & 0x20) ? 1 : 0; hdr.pid = ( (data[1]&0x1f) << 8) | data[2]; hdr.scrambling_ctrl = (data[3] >> 6) & 0x3; hdr.adaptation_field = (data[3] >> 4) & 0x3; hdr.continuity_counter = data[3] & 0xf; if (hdr.error) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d has error (PID could be %d)\n", ts->pck_number, hdr.pid)); return GF_CORRUPTED_DATA; } //#if DEBUG_TS_PACKET GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d PID %d CC %d Encrypted %d\n", ts->pck_number, hdr.pid, hdr.continuity_counter, hdr.scrambling_ctrl)); //#endif if (hdr.scrambling_ctrl) { //TODO add decyphering GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d is scrambled - not supported\n", ts->pck_number, hdr.pid)); return GF_NOT_SUPPORTED; } paf = NULL; payload_size = 184; pos = 4; switch (hdr.adaptation_field) { /*adaptation+data*/ case 3: af_size = data[4]; if (af_size>183) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF field larger than 183 !\n", ts->pck_number)); //error return GF_CORRUPTED_DATA; } paf = &af; memset(paf, 0, sizeof(GF_M2TS_AdaptationField)); //this will stop you when processing invalid (yet existing) mpeg2ts streams in debug assert( af_size<=183); if (af_size>183) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d Detected wrong adaption field size %u when control value is 3\n", ts->pck_number, af_size)); if (af_size) gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid); pos += 1+af_size; payload_size = 183 - af_size; break; /*adaptation only - still process in case of PCR*/ case 2: af_size = data[4]; if (af_size != 183) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] TS Packet %d AF size is %d when it must be 183 for AF type 2\n", ts->pck_number, af_size)); return GF_CORRUPTED_DATA; } paf = &af; memset(paf, 0, sizeof(GF_M2TS_AdaptationField)); gf_m2ts_get_adaptation_field(ts, paf, data+5, af_size, hdr.pid); payload_size = 0; /*no payload and no PCR, return*/ if (!paf->PCR_flag) return GF_OK; break; /*reserved*/ case 0: return GF_OK; default: break; } data += pos; /*PAT*/ if (hdr.pid == GF_M2TS_PID_PAT) { gf_m2ts_gather_section(ts, ts->pat, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_CAT) { gf_m2ts_gather_section(ts, ts->cat, NULL, &hdr, data, payload_size); return GF_OK; } es = ts->ess[hdr.pid]; if (paf && paf->PCR_flag) { if (!es) { u32 i, j; for(i=0; i<gf_list_count(ts->programs); i++) { GF_M2TS_PES *first_pes = NULL; GF_M2TS_Program *program = (GF_M2TS_Program *)gf_list_get(ts->programs,i); if(program->pcr_pid != hdr.pid) continue; for (j=0; j<gf_list_count(program->streams); j++) { GF_M2TS_PES *pes = (GF_M2TS_PES *) gf_list_get(program->streams, j); if (pes->flags & GF_M2TS_INHERIT_PCR) { ts->ess[hdr.pid] = (GF_M2TS_ES *) pes; pes->flags |= GF_M2TS_FAKE_PCR; break; } if (pes->flags & GF_M2TS_ES_IS_PES) { first_pes = pes; } } //non found, use the first media stream as a PCR destination - Q: is it legal to have PCR only streams not declared in PMT ? if (!es && first_pes) { es = (GF_M2TS_ES *) first_pes; first_pes->flags |= GF_M2TS_FAKE_PCR; } break; } if (!es) es = ts->ess[hdr.pid]; } if (es) { GF_M2TS_PES_PCK pck; s64 prev_diff_in_us; Bool discontinuity; s32 cc = -1; if (es->flags & GF_M2TS_FAKE_PCR) { cc = es->program->pcr_cc; es->program->pcr_cc = hdr.continuity_counter; } else if (es->flags & GF_M2TS_ES_IS_PES) cc = ((GF_M2TS_PES*)es)->cc; else if (((GF_M2TS_SECTION_ES*)es)->sec) cc = ((GF_M2TS_SECTION_ES*)es)->sec->cc; discontinuity = paf->discontinuity_indicator; if ((cc>=0) && es->program->before_last_pcr_value) { //no increment of CC if AF only packet if (hdr.adaptation_field == 2) { if (hdr.continuity_counter != cc) { discontinuity = GF_TRUE; } } else if (hdr.continuity_counter != ((cc + 1) & 0xF)) { discontinuity = GF_TRUE; } } memset(&pck, 0, sizeof(GF_M2TS_PES_PCK)); prev_diff_in_us = (s64) (es->program->last_pcr_value /27- es->program->before_last_pcr_value/27); es->program->before_last_pcr_value = es->program->last_pcr_value; es->program->before_last_pcr_value_pck_number = es->program->last_pcr_value_pck_number; es->program->last_pcr_value_pck_number = ts->pck_number; es->program->last_pcr_value = paf->PCR_base * 300 + paf->PCR_ext; if (!es->program->last_pcr_value) es->program->last_pcr_value = 1; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" ("LLU" at 90kHz) - PCR diff is %d us\n", hdr.pid, es->program->last_pcr_value, es->program->last_pcr_value/300, (s32) (es->program->last_pcr_value - es->program->before_last_pcr_value)/27 )); pck.PTS = es->program->last_pcr_value; pck.stream = (GF_M2TS_PES *)es; //try to ignore all discontinuities that are less than 200 ms (seen in some HLS setup ...) if (discontinuity) { s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27; u64 diff = ABS(diff_in_us - prev_diff_in_us); if ((diff_in_us<0) && (diff_in_us >= -200000)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, with discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us)); } //ignore PCR discontinuity indicator if PCR found is larger than previously received PCR and diffence between PCR before and after discontinuity indicator is smaller than 50ms else if ((diff_in_us > 0) && (diff < 200000)) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled but diff is small (diff %d us - PCR diff %d vs prev PCR diff %d) - ignore it\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); } else if (paf->discontinuity_indicator) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR discontinuity not signaled (diff %d us - PCR diff %d vs prev PCR diff %d)\n", hdr.pid, diff, diff_in_us, prev_diff_in_us)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } } else if ( (es->program->last_pcr_value < es->program->before_last_pcr_value) ) { s64 diff_in_us = (s64) (es->program->last_pcr_value - es->program->before_last_pcr_value) / 27; //if less than 200 ms before PCR loop at the last PCR, this is a PCR loop if (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value < 5400000 /*2*2700000*/) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR loop found from "LLU" to "LLU" \n", hdr.pid, es->program->before_last_pcr_value, es->program->last_pcr_value)); } else if ((diff_in_us<0) && (diff_in_us >= -200000)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d new PCR, without discontinuity signaled, is less than previously received PCR (diff %d us) but not too large, trying to ignore discontinuity\n", hdr.pid, diff_in_us)); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[MPEG-2 TS] PID %d PCR found "LLU" is less than previously received PCR "LLU" (PCR diff %g sec) but no discontinuity signaled\n", hdr.pid, es->program->last_pcr_value, es->program->before_last_pcr_value, (GF_M2TS_MAX_PCR - es->program->before_last_pcr_value + es->program->last_pcr_value) / 27000000.0)); pck.flags = GF_M2TS_PES_PCK_DISCONTINUITY; } } if (pck.flags & GF_M2TS_PES_PCK_DISCONTINUITY) { gf_m2ts_reset_parsers_for_program(ts, es->program); } if (ts->on_event) { ts->on_event(ts, GF_M2TS_EVT_PES_PCR, &pck); } } } /*check for DVB reserved PIDs*/ if (!es) { if (hdr.pid == GF_M2TS_PID_SDT_BAT_ST) { gf_m2ts_gather_section(ts, ts->sdt, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_NIT_ST) { /*ignore them, unused at application level*/ gf_m2ts_gather_section(ts, ts->nit, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_EIT_ST_CIT) { /* ignore EIT messages for the moment */ gf_m2ts_gather_section(ts, ts->eit, NULL, &hdr, data, payload_size); return GF_OK; } else if (hdr.pid == GF_M2TS_PID_TDT_TOT_ST) { gf_m2ts_gather_section(ts, ts->tdt_tot, NULL, &hdr, data, payload_size); } else { /* ignore packet */ } } else if (es->flags & GF_M2TS_ES_IS_SECTION) { /* The stream uses sections to carry its payload */ GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; if (ses->sec) gf_m2ts_gather_section(ts, ses->sec, ses, &hdr, data, payload_size); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; /* regular stream using PES packets */ if (pes->reframe && payload_size) gf_m2ts_process_pes(ts, pes, &hdr, data, payload_size, paf); } return GF_OK; } GF_EXPORT GF_Err gf_m2ts_process_data(GF_M2TS_Demuxer *ts, u8 *data, u32 data_size) { GF_Err e=GF_OK; u32 pos, pck_size; Bool is_align = 1; if (ts->buffer_size) { //we are sync, copy remaining bytes if ( (ts->buffer[0]==0x47) && (ts->buffer_size<200)) { u32 pck_size = ts->prefix_present ? 192 : 188; if (ts->alloc_size < 200) { ts->alloc_size = 200; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer + ts->buffer_size, data, pck_size - ts->buffer_size); e |= gf_m2ts_process_packet(ts, (unsigned char *)ts->buffer); data += (pck_size - ts->buffer_size); data_size = data_size - (pck_size - ts->buffer_size); } //not sync, copy over the complete buffer else { if (ts->alloc_size < ts->buffer_size+data_size) { ts->alloc_size = ts->buffer_size+data_size; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer + ts->buffer_size, data, sizeof(char)*data_size); ts->buffer_size += data_size; is_align = 0; data = ts->buffer; data_size = ts->buffer_size; } } /*sync input data*/ pos = gf_m2ts_sync(ts, data, data_size, is_align); if (pos==data_size) { if (is_align) { if (ts->alloc_size<data_size) { ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*data_size); ts->alloc_size = data_size; } memcpy(ts->buffer, data, sizeof(char)*data_size); ts->buffer_size = data_size; } return GF_OK; } pck_size = ts->prefix_present ? 192 : 188; for (;;) { /*wait for a complete packet*/ if (data_size < pos + pck_size) { ts->buffer_size = data_size - pos; data += pos; if (!ts->buffer_size) { return e; } assert(ts->buffer_size<pck_size); if (is_align) { u32 s = ts->buffer_size; if (s<200) s = 200; if (ts->alloc_size < s) { ts->alloc_size = s; ts->buffer = (char*)gf_realloc(ts->buffer, sizeof(char)*ts->alloc_size); } memcpy(ts->buffer, data, sizeof(char)*ts->buffer_size); } else { memmove(ts->buffer, data, sizeof(char)*ts->buffer_size); } return e; } /*process*/ e |= gf_m2ts_process_packet(ts, (unsigned char *)data + pos); pos += pck_size; } return e; } //unused #if 0 GF_ESD *gf_m2ts_get_esd(GF_M2TS_ES *es) { GF_ESD *esd; u32 k, esd_count; esd = NULL; if (es->program->pmt_iod && es->program->pmt_iod->ESDescriptors) { esd_count = gf_list_count(es->program->pmt_iod->ESDescriptors); for (k = 0; k < esd_count; k++) { GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(es->program->pmt_iod->ESDescriptors, k); if (esd_tmp->ESID != es->mpeg4_es_id) continue; esd = esd_tmp; break; } } if (!esd && es->program->additional_ods) { u32 od_count, od_index; od_count = gf_list_count(es->program->additional_ods); for (od_index = 0; od_index < od_count; od_index++) { GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(es->program->additional_ods, od_index); esd_count = gf_list_count(od->ESDescriptors); for (k = 0; k < esd_count; k++) { GF_ESD *esd_tmp = (GF_ESD *)gf_list_get(od->ESDescriptors, k); if (esd_tmp->ESID != es->mpeg4_es_id) continue; esd = esd_tmp; break; } } } return esd; } void gf_m2ts_set_segment_switch(GF_M2TS_Demuxer *ts) { u32 i; for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i]; if (!es) continue; es->flags |= GF_M2TS_ES_IGNORE_NEXT_DISCONTINUITY; } } #endif GF_EXPORT void gf_m2ts_reset_parsers_for_program(GF_M2TS_Demuxer *ts, GF_M2TS_Program *prog) { u32 i; for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { GF_M2TS_ES *es = (GF_M2TS_ES *) ts->ess[i]; if (!es) continue; if (prog && (es->program != prog) ) continue; if (es->flags & GF_M2TS_ES_IS_SECTION) { GF_M2TS_SECTION_ES *ses = (GF_M2TS_SECTION_ES *)es; gf_m2ts_section_filter_reset(ses->sec); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; if (!pes || (pes->pid==pes->program->pmt_pid)) continue; pes->cc = -1; pes->frame_state = 0; pes->pck_data_len = 0; if (pes->prev_data) gf_free(pes->prev_data); pes->prev_data = NULL; pes->prev_data_len = 0; pes->PTS = pes->DTS = 0; // pes->prev_PTS = 0; // pes->first_dts = 0; pes->pes_len = pes->pes_end_packet_number = pes->pes_start_packet_number = 0; if (pes->buf) gf_free(pes->buf); pes->buf = NULL; if (pes->temi_tc_desc) gf_free(pes->temi_tc_desc); pes->temi_tc_desc = NULL; pes->temi_tc_desc_len = pes->temi_tc_desc_alloc_size = 0; pes->before_last_pcr_value = pes->before_last_pcr_value_pck_number = 0; pes->last_pcr_value = pes->last_pcr_value_pck_number = 0; if (pes->program->pcr_pid==pes->pid) { pes->program->last_pcr_value = pes->program->last_pcr_value_pck_number = 0; pes->program->before_last_pcr_value = pes->program->before_last_pcr_value_pck_number = 0; } } } } GF_EXPORT void gf_m2ts_reset_parsers(GF_M2TS_Demuxer *ts) { gf_m2ts_reset_parsers_for_program(ts, NULL); ts->pck_number = 0; gf_m2ts_section_filter_reset(ts->cat); gf_m2ts_section_filter_reset(ts->pat); gf_m2ts_section_filter_reset(ts->sdt); gf_m2ts_section_filter_reset(ts->nit); gf_m2ts_section_filter_reset(ts->eit); gf_m2ts_section_filter_reset(ts->tdt_tot); } #if 0 //unused u32 gf_m2ts_pes_get_framing_mode(GF_M2TS_PES *pes) { if (pes->flags & GF_M2TS_ES_IS_SECTION) { if (pes->flags & GF_M2TS_ES_IS_SL) { if ( ((GF_M2TS_SECTION_ES *)pes)->sec->process_section == NULL) return GF_M2TS_PES_FRAMING_DEFAULT; } return GF_M2TS_PES_FRAMING_SKIP_NO_RESET; } if (!pes->reframe ) return GF_M2TS_PES_FRAMING_SKIP_NO_RESET; if (pes->reframe == gf_m2ts_reframe_default) return GF_M2TS_PES_FRAMING_RAW; if (pes->reframe == gf_m2ts_reframe_reset) return GF_M2TS_PES_FRAMING_SKIP; return GF_M2TS_PES_FRAMING_DEFAULT; } #endif GF_EXPORT GF_Err gf_m2ts_set_pes_framing(GF_M2TS_PES *pes, u32 mode) { if (!pes) return GF_BAD_PARAM; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Setting pes framing mode of PID %d to %d\n", pes->pid, mode) ); /*ignore request for section PIDs*/ if (pes->flags & GF_M2TS_ES_IS_SECTION) { if (pes->flags & GF_M2TS_ES_IS_SL) { if (mode==GF_M2TS_PES_FRAMING_DEFAULT) { ((GF_M2TS_SECTION_ES *)pes)->sec->process_section = gf_m2ts_process_mpeg4section; } else { ((GF_M2TS_SECTION_ES *)pes)->sec->process_section = NULL; } } return GF_OK; } if (pes->pid==pes->program->pmt_pid) return GF_BAD_PARAM; //if component reuse, disable previous pes if ((mode > GF_M2TS_PES_FRAMING_SKIP) && (pes->program->ts->ess[pes->pid] != (GF_M2TS_ES *) pes)) { GF_M2TS_PES *o_pes = (GF_M2TS_PES *) pes->program->ts->ess[pes->pid]; if (o_pes->flags & GF_M2TS_ES_IS_PES) gf_m2ts_set_pes_framing(o_pes, GF_M2TS_PES_FRAMING_SKIP); GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[MPEG-2 TS] Reassinging PID %d from program %d to program %d\n", pes->pid, o_pes->program->number, pes->program->number) ); pes->program->ts->ess[pes->pid] = (GF_M2TS_ES *) pes; } switch (mode) { case GF_M2TS_PES_FRAMING_RAW: pes->reframe = gf_m2ts_reframe_default; break; case GF_M2TS_PES_FRAMING_SKIP: pes->reframe = gf_m2ts_reframe_reset; break; case GF_M2TS_PES_FRAMING_SKIP_NO_RESET: pes->reframe = NULL; break; case GF_M2TS_PES_FRAMING_DEFAULT: default: switch (pes->stream_type) { case GF_M2TS_VIDEO_MPEG1: case GF_M2TS_VIDEO_MPEG2: case GF_M2TS_VIDEO_H264: case GF_M2TS_VIDEO_SVC: case GF_M2TS_VIDEO_HEVC: case GF_M2TS_VIDEO_HEVC_TEMPORAL: case GF_M2TS_VIDEO_HEVC_MCTS: case GF_M2TS_VIDEO_SHVC: case GF_M2TS_VIDEO_SHVC_TEMPORAL: case GF_M2TS_VIDEO_MHVC: case GF_M2TS_VIDEO_MHVC_TEMPORAL: case GF_M2TS_AUDIO_MPEG1: case GF_M2TS_AUDIO_MPEG2: case GF_M2TS_AUDIO_AAC: case GF_M2TS_AUDIO_LATM_AAC: case GF_M2TS_AUDIO_AC3: case GF_M2TS_AUDIO_EC3: //for all our supported codec types, use a reframer filter pes->reframe = gf_m2ts_reframe_default; break; case GF_M2TS_PRIVATE_DATA: /* TODO: handle DVB subtitle streams */ break; case GF_M2TS_METADATA_ID3_HLS: //TODO pes->reframe = gf_m2ts_reframe_id3_pes; break; default: pes->reframe = gf_m2ts_reframe_default; break; } break; } return GF_OK; } GF_EXPORT GF_M2TS_Demuxer *gf_m2ts_demux_new() { GF_M2TS_Demuxer *ts; GF_SAFEALLOC(ts, GF_M2TS_Demuxer); if (!ts) return NULL; ts->programs = gf_list_new(); ts->SDTs = gf_list_new(); ts->pat = gf_m2ts_section_filter_new(gf_m2ts_process_pat, 0); ts->cat = gf_m2ts_section_filter_new(gf_m2ts_process_cat, 0); ts->sdt = gf_m2ts_section_filter_new(gf_m2ts_process_sdt, 1); ts->nit = gf_m2ts_section_filter_new(gf_m2ts_process_nit, 0); ts->eit = gf_m2ts_section_filter_new(NULL/*gf_m2ts_process_eit*/, 1); ts->tdt_tot = gf_m2ts_section_filter_new(gf_m2ts_process_tdt_tot, 1); #ifdef GPAC_ENABLE_MPE gf_dvb_mpe_init(ts); #endif ts->nb_prog_pmt_received = 0; ts->ChannelAppList = gf_list_new(); return ts; } GF_EXPORT void gf_m2ts_demux_dmscc_init(GF_M2TS_Demuxer *ts) { char temp_dir[GF_MAX_PATH]; u32 length; GF_Err e; ts->dsmcc_controler = gf_list_new(); ts->process_dmscc = 1; strcpy(temp_dir, gf_get_default_cache_directory() ); length = (u32) strlen(temp_dir); if(temp_dir[length-1] == GF_PATH_SEPARATOR) { temp_dir[length-1] = 0; } ts->dsmcc_root_dir = (char*)gf_calloc(strlen(temp_dir)+strlen("CarouselData")+2,sizeof(char)); sprintf(ts->dsmcc_root_dir,"%s%cCarouselData",temp_dir,GF_PATH_SEPARATOR); e = gf_mkdir(ts->dsmcc_root_dir); if(e) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[Process DSMCC] Error during the creation of the directory %s \n",ts->dsmcc_root_dir)); } } GF_EXPORT void gf_m2ts_demux_del(GF_M2TS_Demuxer *ts) { u32 i; if (ts->pat) gf_m2ts_section_filter_del(ts->pat); if (ts->cat) gf_m2ts_section_filter_del(ts->cat); if (ts->sdt) gf_m2ts_section_filter_del(ts->sdt); if (ts->nit) gf_m2ts_section_filter_del(ts->nit); if (ts->eit) gf_m2ts_section_filter_del(ts->eit); if (ts->tdt_tot) gf_m2ts_section_filter_del(ts->tdt_tot); for (i=0; i<GF_M2TS_MAX_STREAMS; i++) { //bacause of pure PCR streams, en ES might be reassigned on 2 PIDs, one for the ES and one for the PCR if (ts->ess[i] && (ts->ess[i]->pid==i)) gf_m2ts_es_del(ts->ess[i], ts); } if (ts->buffer) gf_free(ts->buffer); while (gf_list_count(ts->programs)) { GF_M2TS_Program *p = (GF_M2TS_Program *)gf_list_last(ts->programs); gf_list_rem_last(ts->programs); gf_list_del(p->streams); /*reset OD list*/ if (p->additional_ods) { gf_odf_desc_list_del(p->additional_ods); gf_list_del(p->additional_ods); } if (p->pmt_iod) gf_odf_desc_del((GF_Descriptor *)p->pmt_iod); if (p->metadata_pointer_descriptor) gf_m2ts_metadata_pointer_descriptor_del(p->metadata_pointer_descriptor); gf_free(p); } gf_list_del(ts->programs); if (ts->TDT_time) gf_free(ts->TDT_time); gf_m2ts_reset_sdt(ts); if (ts->tdt_tot) gf_list_del(ts->SDTs); #ifdef GPAC_ENABLE_MPE gf_dvb_mpe_shutdown(ts); #endif if (ts->dsmcc_controler) { if (gf_list_count(ts->dsmcc_controler)) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_DSMCC_OVERLORD* dsmcc_overlord = (GF_M2TS_DSMCC_OVERLORD*)gf_list_get(ts->dsmcc_controler,0); gf_cleanup_dir(dsmcc_overlord->root_dir); gf_rmdir(dsmcc_overlord->root_dir); gf_m2ts_delete_dsmcc_overlord(dsmcc_overlord); if(ts->dsmcc_root_dir) { gf_free(ts->dsmcc_root_dir); } #endif } gf_list_del(ts->dsmcc_controler); } while(gf_list_count(ts->ChannelAppList)) { #ifdef GPAC_ENABLE_DSMCC GF_M2TS_CHANNEL_APPLICATION_INFO* ChanAppInfo = (GF_M2TS_CHANNEL_APPLICATION_INFO*)gf_list_get(ts->ChannelAppList,0); gf_m2ts_delete_channel_application_info(ChanAppInfo); gf_list_rem(ts->ChannelAppList,0); #endif } gf_list_del(ts->ChannelAppList); if (ts->dsmcc_root_dir) gf_free(ts->dsmcc_root_dir); gf_free(ts); } #if 0//unused void gf_m2ts_print_info(GF_M2TS_Demuxer *ts) { #ifdef GPAC_ENABLE_MPE gf_m2ts_print_mpe_info(ts); #endif } #endif #define M2TS_PROBE_SIZE 188000 static Bool gf_m2ts_probe_buffer(char *buf, u32 size) { GF_Err e; GF_M2TS_Demuxer *ts; u32 lt; lt = gf_log_get_tool_level(GF_LOG_CONTAINER); gf_log_set_tool_level(GF_LOG_CONTAINER, GF_LOG_QUIET); ts = gf_m2ts_demux_new(); e = gf_m2ts_process_data(ts, buf, size); if (!ts->pck_number) e = GF_BAD_PARAM; gf_m2ts_demux_del(ts); gf_log_set_tool_level(GF_LOG_CONTAINER, lt); if (e) return GF_FALSE; return GF_TRUE; } GF_EXPORT Bool gf_m2ts_probe_file(const char *fileName) { char buf[M2TS_PROBE_SIZE]; u32 size; FILE *t; if (!strncmp(fileName, "gmem://", 7)) { u8 *mem_address; if (gf_blob_get_data(fileName, &mem_address, &size) != GF_OK) { return GF_FALSE; } if (size>M2TS_PROBE_SIZE) size = M2TS_PROBE_SIZE; memcpy(buf, mem_address, size); } else { t = gf_fopen(fileName, "rb"); if (!t) return 0; size = (u32) fread(buf, 1, M2TS_PROBE_SIZE, t); gf_fclose(t); if ((s32) size <= 0) return 0; } return gf_m2ts_probe_buffer(buf, size); } GF_EXPORT Bool gf_m2ts_probe_data(const u8 *data, u32 size) { size /= 188; size *= 188; return gf_m2ts_probe_buffer((char *) data, size); } static void rewrite_pts_dts(unsigned char *ptr, u64 TS) { ptr[0] &= 0xf1; ptr[0] |= (unsigned char)((TS&0x1c0000000ULL)>>29); ptr[1] = (unsigned char)((TS&0x03fc00000ULL)>>22); ptr[2] &= 0x1; ptr[2] |= (unsigned char)((TS&0x0003f8000ULL)>>14); ptr[3] = (unsigned char)((TS&0x000007f80ULL)>>7); ptr[4] &= 0x1; ptr[4] |= (unsigned char)((TS&0x00000007fULL)<<1); assert(((u64)(ptr[0]&0xe)<<29) + ((u64)ptr[1]<<22) + ((u64)(ptr[2]&0xfe)<<14) + ((u64)ptr[3]<<7) + ((ptr[4]&0xfe)>>1) == TS); } #define ADJUST_TIMESTAMP(_TS) \ if (_TS < (u64) -ts_shift) _TS = pcr_mod + _TS + ts_shift; \ else _TS = _TS + ts_shift; \ while (_TS > pcr_mod) _TS -= pcr_mod; \ GF_EXPORT GF_Err gf_m2ts_restamp(u8 *buffer, u32 size, s64 ts_shift, u8 *is_pes) { u32 done = 0; u64 pcr_mod; // if (!ts_shift) return GF_OK; pcr_mod = 0x80000000; pcr_mod*=4; while (done + 188 <= size) { u8 *pesh; u8 *pck; u64 pcr_base=0, pcr_ext=0; u16 pid; u8 adaptation_field, adaptation_field_length; pck = (u8*) buffer+done; if (pck[0]!=0x47) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Invalid sync byte %X\n", pck[0])); return GF_NON_COMPLIANT_BITSTREAM; } pid = ((pck[1] & 0x1f) <<8 ) + pck[2]; adaptation_field_length = 0; adaptation_field = (pck[3] >> 4) & 0x3; if ((adaptation_field==2) || (adaptation_field==3)) { adaptation_field_length = pck[4]; if ( pck[5]&0x10 /*PCR_flag*/) { pcr_base = (((u64)pck[6])<<25) + (pck[7]<<17) + (pck[8]<<9) + (pck[9]<<1) + (pck[10]>>7); pcr_ext = ((pck[10]&1)<<8) + pck[11]; ADJUST_TIMESTAMP(pcr_base); pck[6] = (unsigned char)(0xff&(pcr_base>>25)); pck[7] = (unsigned char)(0xff&(pcr_base>>17)); pck[8] = (unsigned char)(0xff&(pcr_base>>9)); pck[9] = (unsigned char)(0xff&(pcr_base>>1)); pck[10] = (unsigned char)(((0x1&pcr_base)<<7) | 0x7e | ((0x100&pcr_ext)>>8)); if (pcr_ext != ((pck[10]&1)<<8) + pck[11]) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS Restamp] Sanity check failed for PCR restamping\n")); return GF_IO_ERR; } pck[11] = (unsigned char)(0xff&pcr_ext); } /*add adaptation_field_length field*/ adaptation_field_length++; } if (!is_pes[pid] || !(pck[1]&0x40)) { done+=188; continue; } pesh = &pck[4+adaptation_field_length]; if ((pesh[0]==0x00) && (pesh[1]==0x00) && (pesh[2]==0x01)) { Bool has_pts, has_dts; if ((pesh[6]&0xc0)!=0x80) { done+=188; continue; } has_pts = (pesh[7]&0x80); has_dts = has_pts ? (pesh[7]&0x40) : 0; if (has_pts) { u64 PTS; if (((pesh[9]&0xe0)>>4)!=0x2) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES header, PTS decoding: '0010' expected\n", pid)); done+=188; continue; } PTS = gf_m2ts_get_pts(pesh + 9); ADJUST_TIMESTAMP(PTS); rewrite_pts_dts(pesh+9, PTS); } if (has_dts) { u64 DTS = gf_m2ts_get_pts(pesh + 14); ADJUST_TIMESTAMP(DTS); rewrite_pts_dts(pesh+14, DTS); } } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS Restamp] PID %4d: Wrong PES not beginning with start code\n", pid)); } done+=188; } return GF_OK; } #endif /*GPAC_DISABLE_MPEG2TS*/
./CrossVul/dataset_final_sorted/CWE-416/c/bad_1373_0
crossvul-cpp_data_good_3224_1
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/stat.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <linux/cred.h> #include <linux/hash.h> #include <linux/user_namespace.h> #define UCOUNTS_HASHTABLE_BITS 10 static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)]; static DEFINE_SPINLOCK(ucounts_lock); #define ucounts_hashfn(ns, uid) \ hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \ UCOUNTS_HASHTABLE_BITS) #define ucounts_hashentry(ns, uid) \ (ucounts_hashtable + ucounts_hashfn(ns, uid)) #ifdef CONFIG_SYSCTL static struct ctl_table_set * set_lookup(struct ctl_table_root *root) { return &current_user_ns()->set; } static int set_is_seen(struct ctl_table_set *set) { return &current_user_ns()->set == set; } static int set_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct user_namespace *user_ns = container_of(head->set, struct user_namespace, set); int mode; /* Allow users with CAP_SYS_RESOURCE unrestrained access */ if (ns_capable(user_ns, CAP_SYS_RESOURCE)) mode = (table->mode & S_IRWXU) >> 6; else /* Allow all others at most read-only access */ mode = table->mode & S_IROTH; return (mode << 6) | (mode << 3) | mode; } static struct ctl_table_root set_root = { .lookup = set_lookup, .permissions = set_permissions, }; static int zero = 0; static int int_max = INT_MAX; #define UCOUNT_ENTRY(name) \ { \ .procname = name, \ .maxlen = sizeof(int), \ .mode = 0644, \ .proc_handler = proc_dointvec_minmax, \ .extra1 = &zero, \ .extra2 = &int_max, \ } static struct ctl_table user_table[] = { UCOUNT_ENTRY("max_user_namespaces"), UCOUNT_ENTRY("max_pid_namespaces"), UCOUNT_ENTRY("max_uts_namespaces"), UCOUNT_ENTRY("max_ipc_namespaces"), UCOUNT_ENTRY("max_net_namespaces"), UCOUNT_ENTRY("max_mnt_namespaces"), UCOUNT_ENTRY("max_cgroup_namespaces"), #ifdef CONFIG_INOTIFY_USER UCOUNT_ENTRY("max_inotify_instances"), UCOUNT_ENTRY("max_inotify_watches"), #endif { } }; #endif /* CONFIG_SYSCTL */ bool setup_userns_sysctls(struct user_namespace *ns) { #ifdef CONFIG_SYSCTL struct ctl_table *tbl; setup_sysctl_set(&ns->set, &set_root, set_is_seen); tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL); if (tbl) { int i; for (i = 0; i < UCOUNT_COUNTS; i++) { tbl[i].data = &ns->ucount_max[i]; } ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl); } if (!ns->sysctls) { kfree(tbl); retire_sysctl_set(&ns->set); return false; } #endif return true; } void retire_userns_sysctls(struct user_namespace *ns) { #ifdef CONFIG_SYSCTL struct ctl_table *tbl; tbl = ns->sysctls->ctl_table_arg; unregister_sysctl_table(ns->sysctls); retire_sysctl_set(&ns->set); kfree(tbl); #endif } static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent) { struct ucounts *ucounts; hlist_for_each_entry(ucounts, hashent, node) { if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns)) return ucounts; } return NULL; } static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct ucounts *ucounts, *new; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); if (!ucounts) { spin_unlock_irq(&ucounts_lock); new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; new->ns = ns; new->uid = uid; new->count = 0; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); if (ucounts) { kfree(new); } else { hlist_add_head(&new->node, hashent); ucounts = new; } } if (ucounts->count == INT_MAX) ucounts = NULL; else ucounts->count += 1; spin_unlock_irq(&ucounts_lock); return ucounts; } static void put_ucounts(struct ucounts *ucounts) { unsigned long flags; spin_lock_irqsave(&ucounts_lock, flags); ucounts->count -= 1; if (!ucounts->count) hlist_del_init(&ucounts->node); else ucounts = NULL; spin_unlock_irqrestore(&ucounts_lock, flags); kfree(ucounts); } static inline bool atomic_inc_below(atomic_t *v, int u) { int c, old; c = atomic_read(v); for (;;) { if (unlikely(c >= u)) return false; old = atomic_cmpxchg(v, c, c+1); if (likely(old == c)) return true; c = old; } } struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type) { struct ucounts *ucounts, *iter, *bad; struct user_namespace *tns; ucounts = get_ucounts(ns, uid); for (iter = ucounts; iter; iter = tns->ucounts) { int max; tns = iter->ns; max = READ_ONCE(tns->ucount_max[type]); if (!atomic_inc_below(&iter->ucount[type], max)) goto fail; } return ucounts; fail: bad = iter; for (iter = ucounts; iter != bad; iter = iter->ns->ucounts) atomic_dec(&iter->ucount[type]); put_ucounts(ucounts); return NULL; } void dec_ucount(struct ucounts *ucounts, enum ucount_type type) { struct ucounts *iter; for (iter = ucounts; iter; iter = iter->ns->ucounts) { int dec = atomic_dec_if_positive(&iter->ucount[type]); WARN_ON_ONCE(dec < 0); } put_ucounts(ucounts); } static __init int user_namespace_sysctl_init(void) { #ifdef CONFIG_SYSCTL static struct ctl_table_header *user_header; static struct ctl_table empty[1]; /* * It is necessary to register the user directory in the * default set so that registrations in the child sets work * properly. */ user_header = register_sysctl("user", empty); kmemleak_ignore(user_header); BUG_ON(!user_header); BUG_ON(!setup_userns_sysctls(&init_user_ns)); #endif return 0; } subsys_initcall(user_namespace_sysctl_init);
./CrossVul/dataset_final_sorted/CWE-416/c/good_3224_1
crossvul-cpp_data_good_819_1
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/pipe.c * * Copyright (C) 1991, 1992, 1999 Linus Torvalds */ #include <linux/mm.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/log2.h> #include <linux/mount.h> #include <linux/magic.h> #include <linux/pipe_fs_i.h> #include <linux/uio.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/audit.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <linux/memcontrol.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include "internal.h" /* * The max size that a non-root user is allowed to grow the pipe. Can * be set by root in /proc/sys/fs/pipe-max-size */ unsigned int pipe_max_size = 1048576; /* Maximum allocatable pages per user. Hard limit is unset by default, soft * matches default values. */ unsigned long pipe_user_pages_hard; unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; /* * We use a start+len construction, which provides full use of the * allocated memory. * -- Florian Coosmann (FGC) * * Reads with count = 0 should always return 0. * -- Julian Bradfield 1999-06-07. * * FIFOs and Pipes now generate SIGIO for both readers and writers. * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 * * pipe_read & write cleanup * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 */ static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) { if (pipe->files) mutex_lock_nested(&pipe->mutex, subclass); } void pipe_lock(struct pipe_inode_info *pipe) { /* * pipe_lock() nests non-pipe inode locks (for writing to a file) */ pipe_lock_nested(pipe, I_MUTEX_PARENT); } EXPORT_SYMBOL(pipe_lock); void pipe_unlock(struct pipe_inode_info *pipe) { if (pipe->files) mutex_unlock(&pipe->mutex); } EXPORT_SYMBOL(pipe_unlock); static inline void __pipe_lock(struct pipe_inode_info *pipe) { mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); } static inline void __pipe_unlock(struct pipe_inode_info *pipe) { mutex_unlock(&pipe->mutex); } void pipe_double_lock(struct pipe_inode_info *pipe1, struct pipe_inode_info *pipe2) { BUG_ON(pipe1 == pipe2); if (pipe1 < pipe2) { pipe_lock_nested(pipe1, I_MUTEX_PARENT); pipe_lock_nested(pipe2, I_MUTEX_CHILD); } else { pipe_lock_nested(pipe2, I_MUTEX_PARENT); pipe_lock_nested(pipe1, I_MUTEX_CHILD); } } /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe) { DEFINE_WAIT(wait); /* * Pipes are system-local resources, so sleeping on them * is considered a noninteractive wait: */ prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); pipe_unlock(pipe); schedule(); finish_wait(&pipe->wait, &wait); pipe_lock(pipe); } static void anon_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * If nobody else uses this page, and we don't already have a * temporary page, let's keep track of it as a one-deep * allocation cache. (Otherwise just release our reference to it) */ if (page_count(page) == 1 && !pipe->tmp_page) pipe->tmp_page = page; else put_page(page); } static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; if (page_count(page) == 1) { if (memcg_kmem_enabled()) memcg_kmem_uncharge(page, 0); __SetPageLocked(page); return 0; } return 1; } /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal * * Description: * This function attempts to steal the &struct page attached to * @buf. If successful, this function returns 0 and returns with * the page locked. The caller may then reuse the page for whatever * he wishes; the typical use is insertion into a different file * page cache. */ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * A reference of one is golden, that means that the owner of this * page is the only one holding a reference to it. lock the page * and return OK. */ if (page_count(page) == 1) { lock_page(page); return 0; } return 1; } EXPORT_SYMBOL(generic_pipe_buf_steal); /** * generic_pipe_buf_get - get a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to * * Description: * This function grabs an extra reference to @buf. It's used in * in the tee() system call, when we duplicate the buffers in one * pipe into another. */ bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return try_get_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_get); /** * generic_pipe_buf_confirm - verify contents of the pipe buffer * @info: the pipe that the buffer belongs to * @buf: the buffer to confirm * * Description: * This function does nothing, because the generic pipe code uses * pages that are always good when inserted into the pipe. */ int generic_pipe_buf_confirm(struct pipe_inode_info *info, struct pipe_buffer *buf) { return 0; } EXPORT_SYMBOL(generic_pipe_buf_confirm); /** * generic_pipe_buf_release - put a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to put a reference to * * Description: * This function releases a reference to @buf. */ void generic_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { put_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_release); static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; static const struct pipe_buf_operations packet_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; static ssize_t pipe_read(struct kiocb *iocb, struct iov_iter *to) { size_t total_len = iov_iter_count(to); struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; int do_wakeup; ssize_t ret; /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; __pipe_lock(pipe); for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; size_t chars = buf->len; size_t written; int error; if (chars > total_len) chars = total_len; error = pipe_buf_confirm(pipe, buf); if (error) { if (!ret) ret = error; break; } written = copy_page_to_iter(buf->page, buf->offset, chars, to); if (unlikely(written < chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; buf->offset += chars; buf->len -= chars; /* Was it a packet buffer? Clean up and exit */ if (buf->flags & PIPE_BUF_FLAG_PACKET) { total_len = chars; buf->len = 0; } if (!buf->len) { pipe_buf_release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } __pipe_unlock(pipe); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; } static inline int is_packetized(struct file *file) { return (file->f_flags & O_DIRECT) != 0; } static ssize_t pipe_write(struct kiocb *iocb, struct iov_iter *from) { struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; ssize_t ret = 0; int do_wakeup = 0; size_t total_len = iov_iter_count(from); ssize_t chars; /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; __pipe_lock(pipe); if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } /* We try to merge small writes */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ if (pipe->nrbufs && chars != 0) { int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + lastbuf; int offset = buf->offset + buf->len; if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) { ret = pipe_buf_confirm(pipe, buf); if (ret) goto out; ret = copy_page_from_iter(buf->page, offset, chars, from); if (unlikely(ret < chars)) { ret = -EFAULT; goto out; } do_wakeup = 1; buf->len += ret; if (!iov_iter_count(from)) goto out; } } for (;;) { int bufs; if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } bufs = pipe->nrbufs; if (bufs < pipe->buffers) { int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); struct pipe_buffer *buf = pipe->bufs + newbuf; struct page *page = pipe->tmp_page; int copied; if (!page) { page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; } pipe->tmp_page = page; } /* Always wake up, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. * FIXME! Is this really true? */ do_wakeup = 1; copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { if (!ret) ret = -EFAULT; break; } ret += copied; /* Insert it into the buffer array */ buf->page = page; buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = copied; buf->flags = 0; if (is_packetized(filp)) { buf->ops = &packet_pipe_buf_ops; buf->flags = PIPE_BUF_FLAG_PACKET; } pipe->nrbufs = ++bufs; pipe->tmp_page = NULL; if (!iov_iter_count(from)) break; } if (bufs < pipe->buffers) continue; if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } out: __pipe_unlock(pipe); if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { int err = file_update_time(filp); if (err) ret = err; sb_end_write(file_inode(filp)->i_sb); } return ret; } static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe = filp->private_data; int count, buf, nrbufs; switch (cmd) { case FIONREAD: __pipe_lock(pipe); count = 0; buf = pipe->curbuf; nrbufs = pipe->nrbufs; while (--nrbufs >= 0) { count += pipe->bufs[buf].len; buf = (buf+1) & (pipe->buffers - 1); } __pipe_unlock(pipe); return put_user(count, (int __user *)arg); default: return -ENOIOCTLCMD; } } /* No kernel lock held - fine */ static __poll_t pipe_poll(struct file *filp, poll_table *wait) { __poll_t mask; struct pipe_inode_info *pipe = filp->private_data; int nrbufs; poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ nrbufs = pipe->nrbufs; mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) mask |= EPOLLHUP; } if (filp->f_mode & FMODE_WRITE) { mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0; /* * Most Unices do not set EPOLLERR for FIFOs but on Linux they * behave exactly like pipes for poll(). */ if (!pipe->readers) mask |= EPOLLERR; } return mask; } static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) { int kill = 0; spin_lock(&inode->i_lock); if (!--pipe->files) { inode->i_pipe = NULL; kill = 1; } spin_unlock(&inode->i_lock); if (kill) free_pipe_info(pipe); } static int pipe_release(struct inode *inode, struct file *file) { struct pipe_inode_info *pipe = file->private_data; __pipe_lock(pipe); if (file->f_mode & FMODE_READ) pipe->readers--; if (file->f_mode & FMODE_WRITE) pipe->writers--; if (pipe->readers || pipe->writers) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } __pipe_unlock(pipe); put_pipe_info(inode, pipe); return 0; } static int pipe_fasync(int fd, struct file *filp, int on) { struct pipe_inode_info *pipe = filp->private_data; int retval = 0; __pipe_lock(pipe); if (filp->f_mode & FMODE_READ) retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); if (retval < 0 && (filp->f_mode & FMODE_READ)) /* this can happen only if on == T */ fasync_helper(-1, filp, 0, &pipe->fasync_readers); } __pipe_unlock(pipe); return retval; } static unsigned long account_pipe_buffers(struct user_struct *user, unsigned long old, unsigned long new) { return atomic_long_add_return(new - old, &user->pipe_bufs); } static bool too_many_pipe_buffers_soft(unsigned long user_bufs) { unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); return soft_limit && user_bufs > soft_limit; } static bool too_many_pipe_buffers_hard(unsigned long user_bufs) { unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); return hard_limit && user_bufs > hard_limit; } static bool is_unprivileged_user(void) { return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); } struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; unsigned long pipe_bufs = PIPE_DEF_BUFFERS; struct user_struct *user = get_current_user(); unsigned long user_bufs; unsigned int max_size = READ_ONCE(pipe_max_size); pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); if (pipe == NULL) goto out_free_uid; if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) pipe_bufs = max_size >> PAGE_SHIFT; user_bufs = account_pipe_buffers(user, 0, pipe_bufs); if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { user_bufs = account_pipe_buffers(user, pipe_bufs, 1); pipe_bufs = 1; } if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) goto out_revert_acct; pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), GFP_KERNEL_ACCOUNT); if (pipe->bufs) { init_waitqueue_head(&pipe->wait); pipe->r_counter = pipe->w_counter = 1; pipe->buffers = pipe_bufs; pipe->user = user; mutex_init(&pipe->mutex); return pipe; } out_revert_acct: (void) account_pipe_buffers(user, pipe_bufs, 0); kfree(pipe); out_free_uid: free_uid(user); return NULL; } void free_pipe_info(struct pipe_inode_info *pipe) { int i; (void) account_pipe_buffers(pipe->user, pipe->buffers, 0); free_uid(pipe->user); for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) pipe_buf_release(pipe, buf); } if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe->bufs); kfree(pipe); } static struct vfsmount *pipe_mnt __read_mostly; /* * pipefs_dname() is called from d_path(). */ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", d_inode(dentry)->i_ino); } static const struct dentry_operations pipefs_dentry_operations = { .d_dname = pipefs_dname, }; static struct inode * get_pipe_inode(void) { struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); struct pipe_inode_info *pipe; if (!inode) goto fail_inode; inode->i_ino = get_next_ino(); pipe = alloc_pipe_info(); if (!pipe) goto fail_iput; inode->i_pipe = pipe; pipe->files = 2; pipe->readers = pipe->writers = 1; inode->i_fop = &pipefifo_fops; /* * Mark the inode dirty from the very beginning, * that way it will never be moved to the dirty * list because "mark_inode_dirty()" will think * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); return inode; fail_iput: iput(inode); fail_inode: return NULL; } int create_pipe_files(struct file **res, int flags) { struct inode *inode = get_pipe_inode(); struct file *f; if (!inode) return -ENFILE; f = alloc_file_pseudo(inode, pipe_mnt, "", O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)), &pipefifo_fops); if (IS_ERR(f)) { free_pipe_info(inode->i_pipe); iput(inode); return PTR_ERR(f); } f->private_data = inode->i_pipe; res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK), &pipefifo_fops); if (IS_ERR(res[0])) { put_pipe_info(inode, inode->i_pipe); fput(f); return PTR_ERR(res[0]); } res[0]->private_data = inode->i_pipe; res[1] = f; return 0; } static int __do_pipe_flags(int *fd, struct file **files, int flags) { int error; int fdw, fdr; if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) return -EINVAL; error = create_pipe_files(files, flags); if (error) return error; error = get_unused_fd_flags(flags); if (error < 0) goto err_read_pipe; fdr = error; error = get_unused_fd_flags(flags); if (error < 0) goto err_fdr; fdw = error; audit_fd_pair(fdr, fdw); fd[0] = fdr; fd[1] = fdw; return 0; err_fdr: put_unused_fd(fdr); err_read_pipe: fput(files[0]); fput(files[1]); return error; } int do_pipe_flags(int *fd, int flags) { struct file *files[2]; int error = __do_pipe_flags(fd, files, flags); if (!error) { fd_install(fd[0], files[0]); fd_install(fd[1], files[1]); } return error; } /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ static int do_pipe2(int __user *fildes, int flags) { struct file *files[2]; int fd[2]; int error; error = __do_pipe_flags(fd, files, flags); if (!error) { if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { fput(files[0]); fput(files[1]); put_unused_fd(fd[0]); put_unused_fd(fd[1]); error = -EFAULT; } else { fd_install(fd[0], files[0]); fd_install(fd[1], files[1]); } } return error; } SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) { return do_pipe2(fildes, flags); } SYSCALL_DEFINE1(pipe, int __user *, fildes) { return do_pipe2(fildes, 0); } static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(pipe); if (signal_pending(current)) break; } return cur == *cnt ? -ERESTARTSYS : 0; } static void wake_up_partner(struct pipe_inode_info *pipe) { wake_up_interruptible(&pipe->wait); } static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; int ret; filp->f_version = 0; spin_lock(&inode->i_lock); if (inode->i_pipe) { pipe = inode->i_pipe; pipe->files++; spin_unlock(&inode->i_lock); } else { spin_unlock(&inode->i_lock); pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; pipe->files = 1; spin_lock(&inode->i_lock); if (unlikely(inode->i_pipe)) { inode->i_pipe->files++; spin_unlock(&inode->i_lock); free_pipe_info(pipe); pipe = inode->i_pipe; } else { inode->i_pipe = pipe; spin_unlock(&inode->i_lock); } } filp->private_data = pipe; /* OK, we have a pipe and it's pinned down */ __pipe_lock(pipe); /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(pipe); if (!is_pipe && !pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress EPOLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { if (wait_for_partner(pipe, &pipe->w_counter)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(pipe); if (!is_pipe && !pipe->readers) { if (wait_for_partner(pipe, &pipe->r_counter)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(pipe); break; default: ret = -EINVAL; goto err; } /* Ok! */ __pipe_unlock(pipe); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: __pipe_unlock(pipe); put_pipe_info(inode, pipe); return ret; } const struct file_operations pipefifo_fops = { .open = fifo_open, .llseek = no_llseek, .read_iter = pipe_read, .write_iter = pipe_write, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .release = pipe_release, .fasync = pipe_fasync, }; /* * Currently we rely on the pipe array holding a power-of-2 number * of pages. Returns 0 on error. */ unsigned int round_pipe_size(unsigned long size) { if (size > (1U << 31)) return 0; /* Minimum pipe size, as required by POSIX */ if (size < PAGE_SIZE) return PAGE_SIZE; return roundup_pow_of_two(size); } /* * Allocate a new array of pipe buffers and copy the info over. Returns the * pipe size if successful, or return -ERROR on error. */ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) { struct pipe_buffer *bufs; unsigned int size, nr_pages; unsigned long user_bufs; long ret = 0; size = round_pipe_size(arg); nr_pages = size >> PAGE_SHIFT; if (!nr_pages) return -EINVAL; /* * If trying to increase the pipe capacity, check that an * unprivileged user is not trying to exceed various limits * (soft limit check here, hard limit check just below). * Decreasing the pipe capacity is always permitted, even * if the user is currently over a limit. */ if (nr_pages > pipe->buffers && size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) return -EPERM; user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); if (nr_pages > pipe->buffers && (too_many_pipe_buffers_hard(user_bufs) || too_many_pipe_buffers_soft(user_bufs)) && is_unprivileged_user()) { ret = -EPERM; goto out_revert_acct; } /* * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't * expect a lot of shrink+grow operations, just free and allocate * again like we would do for growing. If the pipe currently * contains more buffers than arg, then return busy. */ if (nr_pages < pipe->nrbufs) { ret = -EBUSY; goto out_revert_acct; } bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (unlikely(!bufs)) { ret = -ENOMEM; goto out_revert_acct; } /* * The pipe array wraps around, so just start the new one at zero * and adjust the indexes. */ if (pipe->nrbufs) { unsigned int tail; unsigned int head; tail = pipe->curbuf + pipe->nrbufs; if (tail < pipe->buffers) tail = 0; else tail &= (pipe->buffers - 1); head = pipe->nrbufs - tail; if (head) memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); if (tail) memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); } pipe->curbuf = 0; kfree(pipe->bufs); pipe->bufs = bufs; pipe->buffers = nr_pages; return nr_pages * PAGE_SIZE; out_revert_acct: (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers); return ret; } /* * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same * location, so checking ->i_pipe is not enough to verify that this is a * pipe. */ struct pipe_inode_info *get_pipe_info(struct file *file) { return file->f_op == &pipefifo_fops ? file->private_data : NULL; } long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; __pipe_lock(pipe); switch (cmd) { case F_SETPIPE_SZ: ret = pipe_set_size(pipe, arg); break; case F_GETPIPE_SZ: ret = pipe->buffers * PAGE_SIZE; break; default: ret = -EINVAL; break; } __pipe_unlock(pipe); return ret; } static const struct super_operations pipefs_ops = { .destroy_inode = free_inode_nonrcu, .statfs = simple_statfs, }; /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pipe: will go nicely and kill the special-casing in procfs. */ static struct dentry *pipefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "pipe:", &pipefs_ops, &pipefs_dentry_operations, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { .name = "pipefs", .mount = pipefs_mount, .kill_sb = kill_anon_super, }; static int __init init_pipe_fs(void) { int err = register_filesystem(&pipe_fs_type); if (!err) { pipe_mnt = kern_mount(&pipe_fs_type); if (IS_ERR(pipe_mnt)) { err = PTR_ERR(pipe_mnt); unregister_filesystem(&pipe_fs_type); } } return err; } fs_initcall(init_pipe_fs);
./CrossVul/dataset_final_sorted/CWE-416/c/good_819_1
crossvul-cpp_data_good_5021_9
/* * IPv6 Syncookies implementation for the Linux kernel * * Authors: * Glenn Griffin <ggriffin.kernel@gmail.com> * * Based on IPv4 implementation by Andi Kleen * linux/net/ipv4/syncookies.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/tcp.h> #include <linux/random.h> #include <linux/cryptohash.h> #include <linux/kernel.h> #include <net/ipv6.h> #include <net/tcp.h> #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; /* RFC 2460, Section 8.3: * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] * * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows * using higher values than ipv4 tcp syncookies. * The other values are chosen based on ethernet (1500 and 9k MTU), plus * one that accounts for common encap (PPPoe) overhead. Table must be sorted. */ static __u16 const msstab[] = { 1280 - 60, /* IPV6_MIN_MTU - 60 */ 1480 - 60, 1500 - 60, 9000 - 60, }; static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv6_cookie_scratch); static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr, __be16 sport, __be16 dport, u32 count, int c) { __u32 *tmp; net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); tmp = this_cpu_ptr(ipv6_cookie_scratch); /* * we have 320 bits of information to hash, copy in the remaining * 192 bits required for sha_transform, from the syncookie6_secret * and overwrite the digest with the secret */ memcpy(tmp + 10, syncookie6_secret[c], 44); memcpy(tmp, saddr, 16); memcpy(tmp + 4, daddr, 16); tmp[8] = ((__force u32)sport << 16) + (__force u32)dport; tmp[9] = count; sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); return tmp[17]; } static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr, const struct in6_addr *daddr, __be16 sport, __be16 dport, __u32 sseq, __u32 data) { u32 count = tcp_cookie_time(); return (cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq + (count << COOKIEBITS) + ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data) & COOKIEMASK)); } static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr, const struct in6_addr *daddr, __be16 sport, __be16 dport, __u32 sseq) { __u32 diff, count = tcp_cookie_time(); cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq; diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS); if (diff >= MAX_SYNCOOKIE_AGE) return (__u32)-1; return (cookie - cookie_hash(saddr, daddr, sport, dport, count - diff, 1)) & COOKIEMASK; } u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, __u16 *mssp) { int mssind; const __u16 mss = *mssp; for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) if (mss >= msstab[mssind]) break; *mssp = msstab[mssind]; return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source, th->dest, ntohl(th->seq), mssind); } EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence); __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); return __cookie_v6_init_sequence(iph, th, mssp); } int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, __u32 cookie) { __u32 seq = ntohl(th->seq) - 1; __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, th->source, th->dest, seq); return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } EXPORT_SYMBOL_GPL(__cookie_v6_check); struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) { struct tcp_options_received tcp_opt; struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; int mss; struct dst_entry *dst; __u8 rcv_wscale; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk)) goto out; mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); if (mss == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); if (!cookie_timestamp_decode(&tcp_opt)) goto out; ret = NULL; req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false); if (!req) goto out; ireq = inet_rsk(req); treq = tcp_rsk(req); treq->tfo_listener = false; if (security_inet_conn_request(sk, skb, req)) goto out_free; req->mss = mss; ireq->ir_rmt_port = th->source; ireq->ir_num = ntohs(th->dest); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { atomic_inc(&skb->users); ireq->pktopts = skb; } ireq->ir_iif = sk->sk_bound_dev_if; /* So that link locals have meaning */ if (!sk->sk_bound_dev_if && ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = tcp_v6_iif(skb); ireq->ir_mark = inet_request_mark(sk, skb); req->num_retrans = 0; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->snt_synack.v64 = 0; treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; /* * We need to lookup the dst_entry to get the correct window size. * This is taken from tcp_v6_syn_recv_sock. Somebody please enlighten * me if there is a preferred way. */ { struct in6_addr *final_p, final; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = ireq->ir_v6_rmt_addr; final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); fl6.saddr = ireq->ir_v6_loc_addr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = ireq->ir_mark; fl6.fl6_dport = ireq->ir_rmt_port; fl6.fl6_sport = inet_sk(sk)->inet_sport; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) goto out_free; } req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); ret = tcp_get_cookie_sock(sk, skb, req, dst); out: return ret; out_free: reqsk_free(req); return NULL; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_5021_9
crossvul-cpp_data_good_3175_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PACKET - implements raw packet sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Alan Cox : verify_area() now used correctly * Alan Cox : new skbuff lists, look ma no backlogs! * Alan Cox : tidied skbuff lists. * Alan Cox : Now uses generic datagram routines I * added. Also fixed the peek/read crash * from all old Linux datagram code. * Alan Cox : Uses the improved datagram code. * Alan Cox : Added NULL's for socket options. * Alan Cox : Re-commented the code. * Alan Cox : Use new kernel side addressing * Rob Janssen : Correct MTU usage. * Dave Platt : Counter leaks caused by incorrect * interrupt locking and some slightly * dubious gcc output. Can you read * compiler: it said _VOLATILE_ * Richard Kooijman : Timestamp fixes. * Alan Cox : New buffers. Use sk->mac.raw. * Alan Cox : sendmsg/recvmsg support. * Alan Cox : Protocol setting support * Alexey Kuznetsov : Untied from IPv4 stack. * Cyrus Durgin : Fixed kerneld for kmod. * Michal Ostrowski : Module initialization cleanup. * Ulises Alonso : Frame number limit removal and * packet_set_ring memory leak. * Eric Biederman : Allow for > 8 byte hardware addresses. * The convention is that longer addresses * will simply extend the hardware address * byte arrays at the end of sockaddr_ll * and packet_mreq. * Johann Baudy : Added TX RING. * Chetan Loke : Implemented TPACKET_V3 block abstraction * layer. * Copyright (C) 2011, <lokec@ccs.neu.edu> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/if_vlan.h> #include <linux/virtio_net.h> #include <linux/errqueue.h> #include <linux/net_tstamp.h> #include <linux/percpu.h> #ifdef CONFIG_INET #include <net/inet_common.h> #endif #include <linux/bpf.h> #include <net/compat.h> #include "internal.h" /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back. On receive: ----------- Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. On transmit: ------------ dev->hard_header != NULL mac_header -> ll header data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. */ /* Private packet socket structures. */ /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; }; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring); #define V3_ALIGNMENT (8) #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) #define BLK_PLUS_PRIV(sz_of_priv) \ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) #define PGV_FROM_VMALLOC 1 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) struct packet_sock; static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status); static void packet_increment_head(struct packet_ring_buffer *buff); static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, struct packet_sock *); static void prb_retire_current_block(struct tpacket_kbdq_core *, struct packet_sock *, unsigned int status); static int prb_queue_frozen(struct tpacket_kbdq_core *); static void prb_open_block(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void prb_retire_rx_blk_timer_expired(unsigned long); static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); static void prb_init_blk_timer(struct packet_sock *, struct tpacket_kbdq_core *, void (*func) (unsigned long)); static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_clear_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_fill_vlan_info(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void packet_flush_mclist(struct sock *sk); struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { /* Trick: alias skb original length with * ll.sll_family and ll.protocol in order * to save room. */ unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; #define vio_le() virtio_legacy_is_little_endian() #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) #define GET_PBLOCK_DESC(x, bid) \ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) #define GET_NEXT_PRB_BLK_NUM(x) \ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ ((x)->kactive_blk_num+1) : 0) static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) goto drop; skb = validate_xmit_skb_list(skb, dev); if (skb != orig_skb) goto drop; txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); if (!dev_xmit_complete(ret)) kfree_skb(skb); return ret; drop: atomic_long_inc(&dev->tx_dropped); kfree_skb_list(skb); return NET_XMIT_DROP; } static struct net_device *packet_cached_dev_get(struct packet_sock *po) { struct net_device *dev; rcu_read_lock(); dev = rcu_dereference(po->cached_dev); if (likely(dev)) dev_hold(dev); rcu_read_unlock(); return dev; } static void packet_cached_dev_assign(struct packet_sock *po, struct net_device *dev) { rcu_assign_pointer(po->cached_dev, dev); } static void packet_cached_dev_reset(struct packet_sock *po) { RCU_INIT_POINTER(po->cached_dev, NULL); } static bool packet_use_direct_xmit(const struct packet_sock *po) { return po->xmit == packet_direct_xmit; } static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; } static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { const struct net_device_ops *ops = dev->netdev_ops; u16 queue_index; if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb, NULL, __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { queue_index = __packet_pick_tx_queue(dev, skb); } skb_set_queue_mapping(skb, queue_index); } /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ static void register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); if (!po->running) { if (po->fanout) __fanout_link(sk, po); else dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } } /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock * held. If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the * callers responsibility to take care of this. */ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); po->running = 0; if (po->fanout) __fanout_unlink(sk, po); else __dev_remove_pack(&po->prot_hook); __sock_put(sk); if (sync) { spin_unlock(&po->bind_lock); synchronize_net(); spin_lock(&po->bind_lock); } } static void unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); if (po->running) __unregister_prot_hook(sk, sync); } static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); return virt_to_page(addr); } static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_status = status; flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: h.h2->tp_status = status; flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); } smp_wmb(); } static int __packet_get_status(struct packet_sock *po, void *frame) { union tpacket_uhdr h; smp_rmb(); h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); return h.h1->tp_status; case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return 0; } } static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, unsigned int flags) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (shhwtstamps && (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) return TP_STATUS_TS_RAW_HARDWARE; if (ktime_to_timespec_cond(skb->tstamp, ts)) return TP_STATUS_TS_SOFTWARE; return 0; } static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, struct sk_buff *skb) { union tpacket_uhdr h; struct timespec ts; __u32 ts_status; if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) return 0; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; break; case TPACKET_V2: h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; break; case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); } /* one flush is safe, as both fields always lie on the same cacheline */ flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); smp_wmb(); return ts_status; } static void *packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) { unsigned int pg_vec_pos, frame_offset; union tpacket_uhdr h; pg_vec_pos = position / rb->frames_per_block; frame_offset = position % rb->frames_per_block; h.raw = rb->pg_vec[pg_vec_pos].buffer + (frame_offset * rb->frame_size); if (status != __packet_get_status(po, h.raw)) return NULL; return h.raw; } static void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { return packet_lookup_frame(po, rb, rb->head, status); } static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); } static void prb_shutdown_retire_blk_timer(struct packet_sock *po, struct sk_buff_head *rb_queue) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); spin_lock_bh(&rb_queue->lock); pkc->delete_blk_timer = 1; spin_unlock_bh(&rb_queue->lock); prb_del_retire_blk_timer(pkc); } static void prb_init_blk_timer(struct packet_sock *po, struct tpacket_kbdq_core *pkc, void (*func) (unsigned long)) { init_timer(&pkc->retire_blk_timer); pkc->retire_blk_timer.data = (long)po; pkc->retire_blk_timer.function = func; pkc->retire_blk_timer.expires = jiffies; } static void prb_setup_retire_blk_timer(struct packet_sock *po) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); } static int prb_calc_retire_blk_tmo(struct packet_sock *po, int blk_size_in_bytes) { struct net_device *dev; unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; struct ethtool_link_ksettings ecmd; int err; rtnl_lock(); dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); if (unlikely(!dev)) { rtnl_unlock(); return DEFAULT_PRB_RETIRE_TOV; } err = __ethtool_get_link_ksettings(dev, &ecmd); rtnl_unlock(); if (!err) { /* * If the link speed is so slow you don't really * need to worry about perf anyways */ if (ecmd.base.speed < SPEED_1000 || ecmd.base.speed == SPEED_UNKNOWN) { return DEFAULT_PRB_RETIRE_TOV; } else { msec = 1; div = ecmd.base.speed / 1000; } } mbits = (blk_size_in_bytes * 8) / (1024 * 1024); if (div) mbits /= div; tmo = mbits * msec; if (div) return tmo+1; return tmo; } static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, union tpacket_req_u *req_u) { p1->feature_req_word = req_u->req3.tp_feature_req_word; } static void init_prb_bdqc(struct packet_sock *po, struct packet_ring_buffer *rb, struct pgv *pg_vec, union tpacket_req_u *req_u) { struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd; memset(p1, 0x0, sizeof(*p1)); p1->knxt_seq_num = 1; p1->pkbdq = pg_vec; pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; p1->pkblk_start = pg_vec[0].buffer; p1->kblk_size = req_u->req3.tp_block_size; p1->knum_blocks = req_u->req3.tp_block_nr; p1->hdrlen = po->tp_hdrlen; p1->version = po->tp_version; p1->last_kactive_blk_num = 0; po->stats.stats3.tp_freeze_q_cnt = 0; if (req_u->req3.tp_retire_blk_tov) p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; else p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, req_u->req3.tp_block_size); p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); prb_init_ft_ops(p1, req_u); prb_setup_retire_blk_timer(po); prb_open_block(p1, pbd); } /* Do NOT update the last_blk_num first. * Assumes sk_buff_head lock is held. */ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) { mod_timer(&pkc->retire_blk_timer, jiffies + pkc->tov_in_jiffies); pkc->last_kactive_blk_num = pkc->kactive_blk_num; } /* * Timer logic: * 1) We refresh the timer only when we open a block. * By doing this we don't waste cycles refreshing the timer * on packet-by-packet basis. * * With a 1MB block-size, on a 1Gbps line, it will take * i) ~8 ms to fill a block + ii) memcpy etc. * In this cut we are not accounting for the memcpy time. * * So, if the user sets the 'tmo' to 10ms then the timer * will never fire while the block is still getting filled * (which is what we want). However, the user could choose * to close a block early and that's fine. * * But when the timer does fire, we check whether or not to refresh it. * Since the tmo granularity is in msecs, it is not too expensive * to refresh the timer, lets say every '8' msecs. * Either the user can set the 'tmo' or we can derive it based on * a) line-speed and b) block-size. * prb_calc_retire_blk_tmo() calculates the tmo. * */ static void prb_retire_rx_blk_timer_expired(unsigned long data) { struct packet_sock *po = (struct packet_sock *)data; struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); unsigned int frozen; struct tpacket_block_desc *pbd; spin_lock(&po->sk.sk_receive_queue.lock); frozen = prb_queue_frozen(pkc); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); if (unlikely(pkc->delete_blk_timer)) goto out; /* We only need to plug the race when the block is partially filled. * tpacket_rcv: * lock(); increment BLOCK_NUM_PKTS; unlock() * copy_bits() is in progress ... * timer fires on other cpu: * we can't retire the current block because copy_bits * is in progress. * */ if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { if (!frozen) { if (!BLOCK_NUM_PKTS(pbd)) { /* An empty block. Just refresh the timer. */ goto refresh_timer; } prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); if (!prb_dispatch_next_block(pkc, po)) goto refresh_timer; else goto out; } else { /* Case 1. Queue was frozen because user-space was * lagging behind. */ if (prb_curr_blk_in_use(pkc, pbd)) { /* * Ok, user-space is still behind. * So just refresh the timer. */ goto refresh_timer; } else { /* Case 2. queue was frozen,user-space caught up, * now the link went idle && the timer fired. * We don't have a block to close.So we open this * block and restart the timer. * opening a block thaws the queue,restarts timer * Thawing/timer-refresh is a side effect. */ prb_open_block(pkc, pbd); goto out; } } } refresh_timer: _prb_refresh_rx_retire_blk_timer(pkc); out: spin_unlock(&po->sk.sk_receive_queue.lock); } static void prb_flush_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, __u32 status) { /* Flush everything minus the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 u8 *start, *end; start = (u8 *)pbd1; /* Skip the block header(we know header WILL fit in 4K) */ start += PAGE_SIZE; end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); for (; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif /* Now update the block status. */ BLOCK_STATUS(pbd1) = status; /* Flush the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 start = (u8 *)pbd1; flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif } /* * Side effect: * * 1) flush the block * 2) Increment active_blk_num * * Note:We DONT refresh the timer on purpose. * Because almost always the next block will be opened. */ static void prb_close_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, struct packet_sock *po, unsigned int stat) { __u32 status = TP_STATUS_USER | stat; struct tpacket3_hdr *last_pkt; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; struct sock *sk = &po->sk; if (po->stats.stats3.tp_drops) status |= TP_STATUS_LOSING; last_pkt = (struct tpacket3_hdr *)pkc1->prev; last_pkt->tp_next_offset = 0; /* Get the ts of the last pkt */ if (BLOCK_NUM_PKTS(pbd1)) { h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; } else { /* Ok, we tmo'd - so get the current time. * * It shouldn't really happen as we don't close empty * blocks. See prb_retire_rx_blk_timer_expired(). */ struct timespec ts; getnstimeofday(&ts); h1->ts_last_pkt.ts_sec = ts.tv_sec; h1->ts_last_pkt.ts_nsec = ts.tv_nsec; } smp_wmb(); /* Flush the block */ prb_flush_block(pkc1, pbd1, status); sk->sk_data_ready(sk); pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); } static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) { pkc->reset_pending_on_curr_blk = 0; } /* * Side effect of opening a block: * * 1) prb_queue is thawed. * 2) retire_blk_timer is refreshed. * */ static void prb_open_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1) { struct timespec ts; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; smp_rmb(); /* We could have just memset this but we will lose the * flexibility of making the priv area sticky */ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; BLOCK_NUM_PKTS(pbd1) = 0; BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); getnstimeofday(&ts); h1->ts_first_pkt.ts_sec = ts.tv_sec; h1->ts_first_pkt.ts_nsec = ts.tv_nsec; pkc1->pkblk_start = (char *)pbd1; pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; pbd1->version = pkc1->version; pkc1->prev = pkc1->nxt_offset; pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; prb_thaw_queue(pkc1); _prb_refresh_rx_retire_blk_timer(pkc1); smp_wmb(); } /* * Queue freeze logic: * 1) Assume tp_block_nr = 8 blocks. * 2) At time 't0', user opens Rx ring. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 * 4) user-space is either sleeping or processing block '0'. * 5) tpacket_rcv is currently filling block '7', since there is no space left, * it will close block-7,loop around and try to fill block '0'. * call-flow: * __packet_lookup_frame_in_block * prb_retire_current_block() * prb_dispatch_next_block() * |->(BLOCK_STATUS == USER) evaluates to true * 5.1) Since block-0 is currently in-use, we just freeze the queue. * 6) Now there are two cases: * 6.1) Link goes idle right after the queue is frozen. * But remember, the last open_block() refreshed the timer. * When this timer expires,it will refresh itself so that we can * re-open block-0 in near future. * 6.2) Link is busy and keeps on receiving packets. This is a simple * case and __packet_lookup_frame_in_block will check if block-0 * is free and can now be re-used. */ static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { pkc->reset_pending_on_curr_blk = 1; po->stats.stats3.tp_freeze_q_cnt++; } #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) /* * If the next block is free then we will dispatch it * and return a good offset. * Else, we will freeze the queue. * So, caller must check the return value. */ static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { struct tpacket_block_desc *pbd; smp_rmb(); /* 1. Get current block num */ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* 2. If this block is currently in_use then freeze the queue */ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { prb_freeze_queue(pkc, po); return NULL; } /* * 3. * open this block and return the offset where the first packet * needs to get stored. */ prb_open_block(pkc, pbd); return (void *)pkc->nxt_offset; } static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po, unsigned int status) { struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* retire/close the current block */ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { /* * Plug the case where copy_bits() is in progress on * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't * have space to copy the pkt in the current block and * called prb_retire_current_block() * * We don't need to worry about the TMO case because * the timer-handler already handled this case. */ if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } prb_close_block(pkc, pbd, po, status); return; } } static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd) { return TP_STATUS_USER & BLOCK_STATUS(pbd); } static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) { return pkc->reset_pending_on_curr_blk; } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); } static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); } static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = 0; } static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; ppd->tp_status = TP_STATUS_AVAILABLE; } } static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_padding = 0; prb_fill_vlan_info(pkc, ppd); if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) prb_fill_rxhash(pkc, ppd); else prb_clear_rxhash(pkc, ppd); } static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) { struct tpacket3_hdr *ppd; ppd = (struct tpacket3_hdr *)curr; ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); pkc->prev = curr; pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_NUM_PKTS(pbd) += 1; atomic_inc(&pkc->blk_fill_in_prog); prb_run_all_ft_ops(pkc, ppd); } /* Assumes caller has the sk->rx_queue.lock */ static void *__packet_lookup_frame_in_block(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len ) { struct tpacket_kbdq_core *pkc; struct tpacket_block_desc *pbd; char *curr, *end; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* Queue is frozen when user space is lagging behind */ if (prb_queue_frozen(pkc)) { /* * Check if that last block which caused the queue to freeze, * is still in_use by user-space. */ if (prb_curr_blk_in_use(pkc, pbd)) { /* Can't record this packet */ return NULL; } else { /* * Ok, the block was released by user-space. * Now let's open that block. * opening a block also thaws the queue. * Thawing is a side effect. */ prb_open_block(pkc, pbd); } } smp_mb(); curr = pkc->nxt_offset; pkc->skb = skb; end = (char *)pbd + pkc->kblk_size; /* first try the current block */ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* Ok, close the current block */ prb_retire_current_block(pkc, po, 0); /* Now, try to dispatch the next block */ curr = (char *)prb_dispatch_next_block(pkc, po); if (curr) { pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* * No free blocks are available.user_space hasn't caught up yet. * Queue was just frozen and now this packet will get dropped. */ return NULL; } static void *packet_current_rx_frame(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len) { char *curr = NULL; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: curr = packet_lookup_frame(po, &po->rx_ring, po->rx_ring.head, status); return curr; case TPACKET_V3: return __packet_lookup_frame_in_block(po, skb, status, len); default: WARN(1, "TPACKET version not supported\n"); BUG(); return NULL; } } static void *prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int idx, int status) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); if (status != BLOCK_STATUS(pbd)) return NULL; return pbd; } static int prb_previous_blk_num(struct packet_ring_buffer *rb) { unsigned int prev; if (rb->prb_bdqc.kactive_blk_num) prev = rb->prb_bdqc.kactive_blk_num-1; else prev = rb->prb_bdqc.knum_blocks-1; return prev; } /* Assumes caller has held the rx_queue.lock */ static void *__prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = prb_previous_blk_num(rb); return prb_lookup_block(po, rb, previous, status); } static void *packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { if (po->tp_version <= TPACKET_V2) return packet_previous_frame(po, rb, status); return __prb_previous_block(po, rb, status); } static void packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) { switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: return packet_increment_head(rb); case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return; } } static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; return packet_lookup_frame(po, rb, previous, status); } static void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } static void packet_inc_pending(struct packet_ring_buffer *rb) { this_cpu_inc(*rb->pending_refcnt); } static void packet_dec_pending(struct packet_ring_buffer *rb) { this_cpu_dec(*rb->pending_refcnt); } static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) { unsigned int refcnt = 0; int cpu; /* We don't use pending refcount in rx_ring. */ if (rb->pending_refcnt == NULL) return 0; for_each_possible_cpu(cpu) refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); return refcnt; } static int packet_alloc_pending(struct packet_sock *po) { po->rx_ring.pending_refcnt = NULL; po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); if (unlikely(po->tx_ring.pending_refcnt == NULL)) return -ENOBUFS; return 0; } static void packet_free_pending(struct packet_sock *po) { free_percpu(po->tx_ring.pending_refcnt); } #define ROOM_POW_OFF 2 #define ROOM_NONE 0x0 #define ROOM_LOW 0x1 #define ROOM_NORMAL 0x2 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.frame_max + 1; idx = po->rx_ring.head; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.prb_bdqc.knum_blocks; idx = po->rx_ring.prb_bdqc.kactive_blk_num; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { struct sock *sk = &po->sk; int ret = ROOM_NONE; if (po->prot_hook.func != tpacket_rcv) { int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) - (skb ? skb->truesize : 0); if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) return ROOM_NORMAL; else if (avail > 0) return ROOM_LOW; else return ROOM_NONE; } if (po->tp_version == TPACKET_V3) { if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_v3_has_room(po, 0)) ret = ROOM_LOW; } else { if (__tpacket_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_has_room(po, 0)) ret = ROOM_LOW; } return ret; } static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { int ret; bool has_room; spin_lock_bh(&po->sk.sk_receive_queue.lock); ret = __packet_rcv_has_room(po, skb); has_room = ret == ROOM_NORMAL; if (po->pressure == has_room) po->pressure = !has_room; spin_unlock_bh(&po->sk.sk_receive_queue.lock); return ret; } static void packet_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_error_queue); WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive packet socket: %p\n", sk); return; } sk_refcnt_debug_dec(sk); } static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) { u32 rxhash; int i, count = 0; rxhash = skb_get_hash(skb); for (i = 0; i < ROLLOVER_HLEN; i++) if (po->rollover->history[i] == rxhash) count++; po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; return count > (ROLLOVER_HLEN >> 1); } static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { unsigned int val = atomic_inc_return(&f->rr_cur); return val % num; } static unsigned int fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return smp_processor_id() % num; } static unsigned int fanout_demux_rnd(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return prandom_u32_max(num); } static unsigned int fanout_demux_rollover(struct packet_fanout *f, struct sk_buff *skb, unsigned int idx, bool try_self, unsigned int num) { struct packet_sock *po, *po_next, *po_skip = NULL; unsigned int i, j, room = ROOM_NONE; po = pkt_sk(f->arr[idx]); if (try_self) { room = packet_rcv_has_room(po, skb); if (room == ROOM_NORMAL || (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) return idx; po_skip = po; } i = j = min_t(int, po->rollover->sock, num - 1); do { po_next = pkt_sk(f->arr[i]); if (po_next != po_skip && !po_next->pressure && packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { if (i != j) po->rollover->sock = i; atomic_long_inc(&po->rollover->num); if (room == ROOM_LOW) atomic_long_inc(&po->rollover->num_huge); return i; } if (++i == num) i = 0; } while (i != j); atomic_long_inc(&po->rollover->num_failed); return idx; } static unsigned int fanout_demux_qm(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return skb_get_queue_mapping(skb) % num; } static unsigned int fanout_demux_bpf(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { struct bpf_prog *prog; unsigned int ret = 0; rcu_read_lock(); prog = rcu_dereference(f->bpf_prog); if (prog) ret = bpf_prog_run_clear_cb(prog, skb) % num; rcu_read_unlock(); return ret; } static bool fanout_has_flag(struct packet_fanout *f, u16 flag) { return f->flags & (flag >> 8); } static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct packet_fanout *f = pt->af_packet_priv; unsigned int num = READ_ONCE(f->num_members); struct net *net = read_pnet(&f->net); struct packet_sock *po; unsigned int idx; if (!net_eq(dev_net(dev), net) || !num) { kfree_skb(skb); return 0; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); if (!skb) return 0; } switch (f->type) { case PACKET_FANOUT_HASH: default: idx = fanout_demux_hash(f, skb, num); break; case PACKET_FANOUT_LB: idx = fanout_demux_lb(f, skb, num); break; case PACKET_FANOUT_CPU: idx = fanout_demux_cpu(f, skb, num); break; case PACKET_FANOUT_RND: idx = fanout_demux_rnd(f, skb, num); break; case PACKET_FANOUT_QM: idx = fanout_demux_qm(f, skb, num); break; case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, false, num); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: idx = fanout_demux_bpf(f, skb, num); break; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) idx = fanout_demux_rollover(f, skb, idx, true, num); po = pkt_sk(f->arr[idx]); return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); } DEFINE_MUTEX(fanout_mutex); EXPORT_SYMBOL_GPL(fanout_mutex); static LIST_HEAD(fanout_list); static void __fanout_link(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; spin_lock(&f->lock); f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; spin_unlock(&f->lock); } static void __fanout_unlink(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; int i; spin_lock(&f->lock); for (i = 0; i < f->num_members; i++) { if (f->arr[i] == sk) break; } BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; spin_unlock(&f->lock); } static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) { if (sk->sk_family != PF_PACKET) return false; return ptype->af_packet_priv == pkt_sk(sk)->fanout; } static void fanout_init_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_LB: atomic_set(&f->rr_cur, 0); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: RCU_INIT_POINTER(f->bpf_prog, NULL); break; } } static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) { struct bpf_prog *old; spin_lock(&f->lock); old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); rcu_assign_pointer(f->bpf_prog, new); spin_unlock(&f->lock); if (old) { synchronize_net(); bpf_prog_destroy(old); } } static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; struct sock_fprog fprog; int ret; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fprog)) return -EINVAL; if (copy_from_user(&fprog, data, len)) return -EFAULT; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) return ret; __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; u32 fd; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fd)) return -EINVAL; if (copy_from_user(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(new)) return PTR_ERR(new); __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data(struct packet_sock *po, char __user *data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: return -EINVAL; }; } static void fanout_release_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: __fanout_set_data_bpf(f, NULL); }; } static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_rollover *rollover = NULL; struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f, *match; u8 type = type_flags & 0xff; u8 flags = type_flags >> 8; int err; switch (type) { case PACKET_FANOUT_ROLLOVER: if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) return -EINVAL; case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: case PACKET_FANOUT_RND: case PACKET_FANOUT_QM: case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: break; default: return -EINVAL; } mutex_lock(&fanout_mutex); err = -EINVAL; if (!po->running) goto out; err = -EALREADY; if (po->fanout) goto out; if (type == PACKET_FANOUT_ROLLOVER || (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { err = -ENOMEM; rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); if (!rollover) goto out; atomic_long_set(&rollover->num, 0); atomic_long_set(&rollover->num_huge, 0); atomic_long_set(&rollover->num_failed, 0); po->rollover = rollover; } match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && read_pnet(&f->net) == sock_net(sk)) { match = f; break; } } err = -EINVAL; if (match && match->flags != flags) goto out; if (!match) { err = -ENOMEM; match = kzalloc(sizeof(*match), GFP_KERNEL); if (!match) goto out; write_pnet(&match->net, sock_net(sk)); match->id = id; match->type = type; match->flags = flags; INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); atomic_set(&match->sk_ref, 0); fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } err = -EINVAL; if (match->type == type && match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; atomic_inc(&match->sk_ref); __fanout_link(sk, po); err = 0; } } out: if (err && rollover) { kfree(rollover); po->rollover = NULL; } mutex_unlock(&fanout_mutex); return err; } static void fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; mutex_lock(&fanout_mutex); f = po->fanout; if (f) { po->fanout = NULL; if (atomic_dec_and_test(&f->sk_ref)) { list_del(&f->list); dev_remove_pack(&f->prot_hook); fanout_release_data(f); kfree(f); } if (po->rollover) kfree_rcu(po->rollover, rcu); } mutex_unlock(&fanout_mutex); } static bool packet_extra_vlan_len_allowed(const struct net_device *dev, struct sk_buff *skb) { /* Earlier code assumed this would be a VLAN pkt, double-check * this now that we have the actual packet in hand. We can only * do this check on Ethernet devices. */ if (unlikely(dev->type != ARPHRD_ETHER)) return false; skb_reset_mac_header(skb); return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); } static const struct proto_ops packet_ops; static const struct proto_ops packet_ops_spkt; static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if (!net_eq(dev_net(dev), sock_net(sk))) goto out; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto oom; /* drop any routing info */ skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spkt = &PACKET_SKB_CB(skb)->sa.pkt; skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk, skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; } /* * Output a raw packet to a device layer. This bypasses all the other * protocol layers and you must therefore supply it with a complete frame */ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; /* * Get and verify the address. */ if (saddr) { if (msg->msg_namelen < sizeof(struct sockaddr)) return -EINVAL; if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) proto = saddr->spkt_protocol; } else return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ /* * Find the device first to size check it */ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; /* * You may not queue a frame bigger than the mtu. This is the lowest level * raw protocol and you must do your own fragmentation at this level. */ if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) goto out_unlock; if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard * header at transmission time by themselves. PPP is the notable * one here. This should really be fixed at the driver level. */ skb_reserve(skb, reserved); skb_reset_network_header(skb); /* Try to align data part correctly */ if (hhlen) { skb->data -= hhlen; skb->tail -= hhlen; if (len < hhlen) skb_reset_network_header(skb); } err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) goto out_free; goto retry; } if (!dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_unlock; } if (len > (dev->mtu + dev->hard_header_len + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_unlock; } sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; skb_probe_transport_header(skb, 0); dev_queue_xmit(skb); rcu_read_unlock(); return len; out_unlock: rcu_read_unlock(); out_free: kfree_skb(skb); return err; } static unsigned int run_filter(struct sk_buff *skb, const struct sock *sk, unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) res = bpf_prog_run_clear_cb(filter->prog, skb); rcu_read_unlock(); return res; } static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, size_t *len) { struct virtio_net_hdr vnet_hdr; if (*len < sizeof(vnet_hdr)) return -EINVAL; *len -= sizeof(vnet_hdr); if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) return -EINVAL; return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); } /* * This function makes lazy skb cloning in hope that most of packets * are discarded by BPF. * * Note tricky part: we DO mangle shared skb! skb->data, skb->len * and skb->cb are mangled. It works because (and until) packets * falling here are owned by current CPU. Output packets are cloned * by dev_queue_xmit_nit(), input packets are processed by net_bh * sequencially, so that if we return skb to original state on exit, * we will not harm anyone. */ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; bool is_drop_n_account = false; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; skb->dev = dev; if (dev->header_ops) { /* The device has an explicit notion of ll header, * exported to higher levels. * * Otherwise, the device hides details of its frame * structure, so that corresponding packet head is * never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct; if (skb_head != skb->data) { skb->data = skb_head; skb->len = skb_len; } consume_skb(skb); skb = nskb; } sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); sll = &PACKET_SKB_CB(skb)->sa.ll; sll->sll_hatype = dev->type; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; sll->sll_halen = dev_parse_header(skb, sll->sll_addr); /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). * Use their space for storing the original skb length. */ PACKET_SKB_CB(skb)->sa.origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; skb_set_owner_r(skb, sk); skb->dev = NULL; skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; sock_skb_set_dropcount(sk, skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); return 0; drop_n_acct: is_drop_n_account = true; spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_drops++; atomic_inc(&sk->sk_drops); spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; } static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing * userspace to call getsockopt(..., PACKET_HDRLEN, ...). */ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; if (po->has_vnet_hdr) netoff += sizeof(struct virtio_net_hdr); macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) snaplen = 0; } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { u32 nval; nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", snaplen, nval, macoff); snaplen = nval; if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; } } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_rx_frame(po, skb, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* * LOSING will be reported till you read the stats, * because it's COR - Clear On Read. * Anyways, moving it for V1/V2 only as V3 doesn't need this * at packet level. */ if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); if (po->has_vnet_hdr) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true)) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } } skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) getnstimeofday(&ts); status |= ts_status; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (skb_vlan_tag_present(skb)) { h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; } memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); hdrlen = sizeof(*h.h2); break; case TPACKET_V3: /* tp_nxt_offset,vlan are already populated above. * So DONT clear those fields here */ h.h3->tp_status |= status; h.h3->tp_len = skb->len; h.h3->tp_snaplen = snaplen; h.h3->tp_mac = macoff; h.h3->tp_net = netoff; h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); hdrlen = sizeof(*h.h3); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 if (po->tp_version <= TPACKET_V2) { u8 *start, *end; end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } smp_wmb(); #endif if (po->tp_version <= TPACKET_V2) { __packet_set_status(po, h.raw, status); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); } drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; drop_n_account: is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); kfree_skb(copy_skb); goto drop_n_restore; } static void tpacket_destruct_skb(struct sk_buff *skb) { struct packet_sock *po = pkt_sk(skb->sk); if (likely(po->tx_ring.pg_vec)) { void *ph; __u32 ts; ph = skb_shinfo(skb)->destructor_arg; packet_dec_pending(&po->tx_ring); ts = __packet_set_timestamp(po, ph, skb); __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); } sock_wfree(skb); } static void tpacket_set_protocol(const struct net_device *dev, struct sk_buff *skb) { if (dev->type == ARPHRD_ETHER) { skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; } } static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) { if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) return -EINVAL; return 0; } static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, struct virtio_net_hdr *vnet_hdr) { if (*len < sizeof(*vnet_hdr)) return -EINVAL; *len -= sizeof(*vnet_hdr); if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter)) return -EFAULT; return __packet_snd_vnet_parse(vnet_hdr, *len); } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, __be16 proto, unsigned char *addr, int hlen, int copylen, const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; int err; ph.raw = frame; skb->protocol = proto; skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); skb_reset_network_header(skb); to_write = tp_len; if (sock->type == SOCK_DGRAM) { err = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, tp_len); if (unlikely(err < 0)) return -EINVAL; } else if (copylen) { int hdrlen = min_t(int, copylen, tp_len); skb_push(skb, dev->hard_header_len); skb_put(skb, copylen - dev->hard_header_len); err = skb_store_bits(skb, 0, data, hdrlen); if (unlikely(err)) return err; if (!dev_validate_header(dev, skb->data, hdrlen)) return -EINVAL; if (!skb->protocol) tpacket_set_protocol(dev, skb); data += hdrlen; to_write -= hdrlen; } offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); skb->data_len = to_write; skb->len += to_write; skb->truesize += to_write; atomic_add(to_write, &po->sk.sk_wmem_alloc); while (likely(to_write)) { nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { pr_err("Packet exceed the number of skb frags(%lu)\n", MAX_SKB_FRAGS); return -EFAULT; } page = pgv_to_page(data); data += len; flush_dcache_page(page); get_page(page); skb_fill_page_desc(skb, nr_frags, page, offset, len); to_write -= len; offset = 0; len_max = PAGE_SIZE; len = ((to_write > len_max) ? len_max : to_write); } skb_probe_transport_header(skb, 0); return tp_len; } static int tpacket_parse_header(struct packet_sock *po, void *frame, int size_max, void **data) { union tpacket_uhdr ph; int tp_len, off; ph.raw = frame; switch (po->tp_version) { case TPACKET_V2: tp_len = ph.h2->tp_len; break; default: tp_len = ph.h1->tp_len; break; } if (unlikely(tp_len > size_max)) { pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); return -EMSGSIZE; } if (unlikely(po->tp_tx_has_off)) { int off_min, off_max; off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); off_max = po->tx_ring.frame_size - tp_len; if (po->sk.sk_type == SOCK_DGRAM) { switch (po->tp_version) { case TPACKET_V2: off = ph.h2->tp_net; break; default: off = ph.h1->tp_net; break; } } else { switch (po->tp_version) { case TPACKET_V2: off = ph.h2->tp_mac; break; default: off = ph.h1->tp_mac; break; } } if (unlikely((off < off_min) || (off_max < off))) return -EINVAL; } else { off = po->tp_hdrlen - sizeof(struct sockaddr_ll); } *data = frame + off; return tp_len; } static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); int tp_len, size_max; unsigned char *addr; void *data; int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; mutex_lock(&po->pg_vec_lock); if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) goto out; } err = -ENXIO; if (unlikely(dev == NULL)) goto out; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; if (po->sk.sk_socket->type == SOCK_RAW) reserve = dev->hard_header_len; size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) size_max = dev->mtu + reserve + VLAN_HLEN; do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { if (need_wait && need_resched()) schedule(); continue; } skb = NULL; tp_len = tpacket_parse_header(po, ph, size_max, &data); if (tp_len < 0) goto tpacket_error; status = TP_STATUS_SEND_REQUEST; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; if (po->has_vnet_hdr) { vnet_hdr = data; data += sizeof(*vnet_hdr); tp_len -= sizeof(*vnet_hdr); if (tp_len < 0 || __packet_snd_vnet_parse(vnet_hdr, tp_len)) { tp_len = -EINVAL; goto tpacket_error; } copylen = __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len); } copylen = max_t(int, copylen, dev->hard_header_len); skb = sock_alloc_send_skb(&po->sk, hlen + tlen + sizeof(struct sockaddr_ll) + (copylen - dev->hard_header_len), !need_wait, &err); if (unlikely(skb == NULL)) { /* we assume the socket was initially writeable ... */ if (likely(len_sum > 0)) err = len_sum; goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && !packet_extra_vlan_len_allowed(dev, skb)) tp_len = -EMSGSIZE; if (unlikely(tp_len < 0)) { tpacket_error: if (po->tp_loss) { __packet_set_status(po, ph, TP_STATUS_AVAILABLE); packet_increment_head(&po->tx_ring); kfree_skb(skb); continue; } else { status = TP_STATUS_WRONG_FORMAT; err = tp_len; goto out_status; } } if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) { tp_len = -EINVAL; goto tpacket_error; } packet_pick_tx_queue(dev, skb); skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); packet_inc_pending(&po->tx_ring); status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); if (unlikely(err > 0)) { err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ skb = NULL; goto out_status; } /* * skb was dropped but not destructed yet; * let's treat it like congestion or err < 0 */ err = 0; } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || /* Note: packet_read_pending() might be slow if we have * to call it as it's per_cpu variable, but in fast-path * we already short-circuit the loop with the first * condition, and luckily don't have to go that path * anyway. */ (need_wait && packet_read_pending(&po->tx_ring)))); err = len_sum; goto out_put; out_status: __packet_set_status(po, ph, status); kfree_skb(skb); out_put: dev_put(dev); out: mutex_unlock(&po->pg_vec_lock); return err; } static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, size_t reserve, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err, 0); if (!skb) return NULL; skb_reserve(skb, reserve); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); struct sk_buff *skb; struct net_device *dev; __be16 proto; unsigned char *addr; int err, reserve = 0; struct sockcm_cookie sockc; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; struct packet_sock *po = pkt_sk(sk); int hlen, tlen, linear; int extra_len = 0; /* * Get and verify the address. */ if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out_unlock; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; if (po->has_vnet_hdr) { err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); if (err) goto out_unlock; } if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) goto out_unlock; err = -ENOBUFS; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); linear = max(linear, min_t(int, len, dev->hard_header_len)); skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; skb_set_network_header(skb, reserve); err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (unlikely(offset < 0)) goto out_free; } /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); if (err) goto out_free; if (sock->type == SOCK_RAW && !dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_free; } sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_free; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; packet_pick_tx_queue(dev, skb); if (po->has_vnet_hdr) { err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); if (err) goto out_free; len += sizeof(vnet_hdr); } skb_probe_transport_header(skb, reserve); if (unlikely(extra_len == 4)) skb->no_fcs = 1; err = po->xmit(skb); if (err > 0 && (err = net_xmit_errno(err)) != 0) goto out_unlock; dev_put(dev); return len; out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); out: return err; } static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); if (po->tx_ring.pg_vec) return tpacket_snd(po, msg); else return packet_snd(sock, msg, len); } /* * Close a PACKET socket. This is fairly simple. We immediately go * to 'closed' state and remove our protocol entry in the device list. */ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; struct net *net; union tpacket_req_u req_u; if (!sk) return 0; net = sock_net(sk); po = pkt_sk(sk); mutex_lock(&net->packet.sklist_lock); sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); packet_cached_dev_reset(po); if (po->prot_hook.dev) { dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); packet_flush_mclist(sk); if (po->rx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); } if (po->tx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); } fanout_release(sk); synchronize_net(); /* * Now the socket is dead. No more input will appear. */ sock_orphan(sk); sock->sk = NULL; /* Purge queues */ skb_queue_purge(&sk->sk_receive_queue); packet_free_pending(po); sk_refcnt_debug_release(sk); sock_put(sk); return 0; } /* * Attach a packet hook. */ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; if (po->fanout) return -EINVAL; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; } /* * Bind a packet socket to a device */ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; char name[15]; /* * Check legality */ if (addr_len != sizeof(struct sockaddr)) return -EINVAL; strlcpy(name, uaddr->sa_data, sizeof(name)); return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; /* * Check legality */ if (addr_len < sizeof(struct sockaddr_ll)) return -EINVAL; if (sll->sll_family != AF_PACKET) return -EINVAL; return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol ? : pkt_sk(sk)->num); } static struct proto packet_proto = { .name = "PACKET", .owner = THIS_MODULE, .obj_size = sizeof(struct packet_sock), }; /* * Create a packet of type SOCK_PACKET. */ static int packet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && sock->type != SOCK_PACKET) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); if (sk == NULL) goto out; sock->ops = &packet_ops; if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; err = packet_alloc_pending(po); if (err) goto out2; packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); mutex_init(&po->pg_vec_lock); po->rollover = NULL; po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; if (proto) { po->prot_hook.type = proto; register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); sk_add_node_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); preempt_enable(); return 0; out2: sk_free(sk); out: return err; } /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. */ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; int vnet_hdr_len = 0; unsigned int origlen = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = sock_recv_errqueue(sk, msg, len, SOL_PACKET, PACKET_TX_TIMESTAMP); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->pressure) packet_rcv_has_room(pkt_sk(sk), NULL); if (pkt_sk(sk)->has_vnet_hdr) { err = packet_rcv_vnet(msg, skb, &len); if (err) goto out_free; vnet_hdr_len = sizeof(struct virtio_net_hdr); } /* You lose any data beyond the buffer you gave. If it worries * a user program they can ask the device for its MTU * anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; if (sock->type != SOCK_PACKET) { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { /* If the address length field is there to be filled * in, we fill it in now. */ if (sock->type == SOCK_PACKET) { __sockaddr_check_size(sizeof(struct sockaddr_pkt)); msg->msg_namelen = sizeof(struct sockaddr_pkt); } else { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); } if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (skb_vlan_tag_present(skb)) { aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; } put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; if (peer) return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); return 0; } static int packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; } static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) { switch (i->type) { case PACKET_MR_MULTICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr); else return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); case PACKET_MR_ALLMULTI: return dev_set_allmulti(dev, what); case PACKET_MR_UNICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_uc_add(dev, i->addr); else return dev_uc_del(dev, i->addr); break; default: break; } return 0; } static void packet_dev_mclist_delete(struct net_device *dev, struct packet_mclist **mlp) { struct packet_mclist *ml; while ((ml = *mlp) != NULL) { if (ml->ifindex == dev->ifindex) { packet_dev_mc(dev, ml, -1); *mlp = ml->next; kfree(ml); } else mlp = &ml->next; } } static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml, *i; struct net_device *dev; int err; rtnl_lock(); err = -ENODEV; dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); if (!dev) goto done; err = -EINVAL; if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; i = kmalloc(sizeof(*i), GFP_KERNEL); if (i == NULL) goto done; err = 0; for (ml = po->mclist; ml; ml = ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { ml->count++; /* Free the new element ... */ kfree(i); goto done; } } i->type = mreq->mr_type; i->ifindex = mreq->mr_ifindex; i->alen = mreq->mr_alen; memcpy(i->addr, mreq->mr_address, i->alen); memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); i->count = 1; i->next = po->mclist; po->mclist = i; err = packet_dev_mc(dev, i, 1); if (err) { po->mclist = i->next; kfree(i); } done: rtnl_unlock(); return err; } static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_mclist *ml, **mlp; rtnl_lock(); for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev) packet_dev_mc(dev, ml, -1); kfree(ml); } break; } } rtnl_unlock(); return 0; } static void packet_flush_mclist(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml; if (!po->mclist) return; rtnl_lock(); while ((ml = po->mclist) != NULL) { struct net_device *dev; po->mclist = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev != NULL) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); } static int packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: break; default: return -EINVAL; } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_version = val; ret = 0; } release_sock(sk); return ret; } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_reserve = val; return 0; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } } static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; if (level != SOL_PACKET) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case PACKET_STATISTICS: spin_lock_bh(&sk->sk_receive_queue.lock); memcpy(&st, &po->stats, sizeof(st)); memset(&po->stats, 0, sizeof(po->stats)); spin_unlock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { lv = sizeof(struct tpacket_stats_v3); st.stats3.tp_packets += st.stats3.tp_drops; data = &st.stats3; } else { lv = sizeof(struct tpacket_stats); st.stats1.tp_packets += st.stats1.tp_drops; data = &st.stats1; } break; case PACKET_AUXDATA: val = po->auxdata; break; case PACKET_ORIGDEV: val = po->origdev; break; case PACKET_VNET_HDR: val = po->has_vnet_hdr; break; case PACKET_VERSION: val = po->tp_version; break; case PACKET_HDRLEN: if (len > sizeof(int)) len = sizeof(int); if (copy_from_user(&val, optval, len)) return -EFAULT; switch (val) { case TPACKET_V1: val = sizeof(struct tpacket_hdr); break; case TPACKET_V2: val = sizeof(struct tpacket2_hdr); break; case TPACKET_V3: val = sizeof(struct tpacket3_hdr); break; default: return -EINVAL; } break; case PACKET_RESERVE: val = po->tp_reserve; break; case PACKET_LOSS: val = po->tp_loss; break; case PACKET_TIMESTAMP: val = po->tp_tstamp; break; case PACKET_FANOUT: val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16) | ((u32)po->fanout->flags << 24)) : 0); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) return -EINVAL; rstats.tp_all = atomic_long_read(&po->rollover->num); rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); data = &rstats; lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; break; case PACKET_QDISC_BYPASS: val = packet_use_direct_xmit(po); break; default: return -ENOPROTOOPT; } if (len > lv) len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static int compat_packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct packet_sock *po = pkt_sk(sock->sk); if (level != SOL_PACKET) return -ENOPROTOOPT; if (optname == PACKET_FANOUT_DATA && po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { optval = (char __user *)get_compat_bpf_fprog(optval); if (!optval) return -EFAULT; optlen = sizeof(struct sock_fprog); } return packet_setsockopt(sock, level, optname, optval, optlen); } #endif static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { struct sock *sk; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); sk_for_each_rcu(sk, &net->packet.sklist) { struct packet_sock *po = pkt_sk(sk); switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); /* fallthrough */ case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->running) { __unregister_prot_hook(sk, false); sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); fanout_release(sk); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); } break; case NETDEV_UP: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->num) register_prot_hook(sk); spin_unlock(&po->bind_lock); } break; } } rcu_read_unlock(); return NOTIFY_DONE; } static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: return inet_dgram_ops.ioctl(sock, cmd, arg); #endif default: return -ENOIOCTLCMD; } return 0; } static unsigned int packet_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned int mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { if (!packet_previous_rx_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) mask |= POLLIN | POLLRDNORM; } if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) po->pressure = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) mask |= POLLOUT | POLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; } /* Dirty? Well, I still did not learn better way to account * for user mmaps. */ static void packet_mm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_inc(&pkt_sk(sk)->mapped); } static void packet_mm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_dec(&pkt_sk(sk)->mapped); } static const struct vm_operations_struct packet_mmap_ops = { .open = packet_mm_open, .close = packet_mm_close, }; static void free_pg_vec(struct pgv *pg_vec, unsigned int order, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { if (is_vmalloc_addr(pg_vec[i].buffer)) vfree(pg_vec[i].buffer); else free_pages((unsigned long)pg_vec[i].buffer, order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* __get_free_pages failed, fall back to vmalloc */ buffer = vzalloc((1 << order) * PAGE_SIZE); if (buffer) return buffer; /* vmalloc failed, lets dig into swap here */ gfp_flags &= ~__GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* complete and utter failure */ return NULL; } static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; struct pgv *pg_vec; int i; pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); if (unlikely(!pg_vec)) goto out; for (i = 0; i < block_nr; i++) { pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } out: return pg_vec; out_free_pgvec: free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; lock_sock(sk); /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { net_warn_ratelimited("Tx-ring is not supported.\n"); goto out; } rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (packet_read_pending(rb)) goto out; } if (req->tp_block_nr) { /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; case TPACKET_V3: po->tp_hdrlen = TPACKET3_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && (int)(req->tp_block_size - BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V3: /* Transmit path is not supported. We checked * it above but just being paranoid */ if (!tx_ring) init_prb_bdqc(po, rb, pg_vec, req_u); break; default: break; } } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { po->num = 0; __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running) { po->num = num; register_prot_hook(sk); } spin_unlock(&po->bind_lock); if (closing && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: release_sock(sk); return err; } static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned long size, expected_size; struct packet_ring_buffer *rb; unsigned long start; int err = -EINVAL; int i; if (vma->vm_pgoff) return -EINVAL; mutex_lock(&po->pg_vec_lock); expected_size = 0; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec) { expected_size += rb->pg_vec_len * rb->pg_vec_pages * PAGE_SIZE; } } if (expected_size == 0) goto out; size = vma->vm_end - vma->vm_start; if (size != expected_size) goto out; start = vma->vm_start; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec == NULL) continue; for (i = 0; i < rb->pg_vec_len; i++) { struct page *page; void *kaddr = rb->pg_vec[i].buffer; int pg_num; for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { page = pgv_to_page(kaddr); err = vm_insert_page(vma, start, page); if (unlikely(err)) goto out; start += PAGE_SIZE; kaddr += PAGE_SIZE; } } } atomic_inc(&po->mapped); vma->vm_ops = &packet_mmap_ops; err = 0; out: mutex_unlock(&po->pg_vec_lock); return err; } static const struct proto_ops packet_ops_spkt = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind_spkt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_packet_setsockopt, #endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { .family = PF_PACKET, .create = packet_create, .owner = THIS_MODULE, }; static struct notifier_block packet_netdev_notifier = { .notifier_call = packet_notifier, }; #ifdef CONFIG_PROC_FS static void *packet_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); rcu_read_lock(); return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); } static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); return seq_hlist_next_rcu(v, &net->packet.sklist, pos); } static void packet_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int packet_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); else { struct sock *s = sk_entry(v); const struct packet_sock *po = pkt_sk(s); seq_printf(seq, "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), s->sk_type, ntohs(po->num), po->ifindex, po->running, atomic_read(&s->sk_rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), sock_i_ino(s)); } return 0; } static const struct seq_operations packet_seq_ops = { .start = packet_seq_start, .next = packet_seq_next, .stop = packet_seq_stop, .show = packet_seq_show, }; static int packet_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &packet_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations packet_seq_fops = { .owner = THIS_MODULE, .open = packet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init packet_net_init(struct net *net) { mutex_init(&net->packet.sklist_lock); INIT_HLIST_HEAD(&net->packet.sklist); if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) return -ENOMEM; return 0; } static void __net_exit packet_net_exit(struct net *net) { remove_proc_entry("packet", net->proc_net); } static struct pernet_operations packet_net_ops = { .init = packet_net_init, .exit = packet_net_exit, }; static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); } static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; } module_init(packet_init); module_exit(packet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PACKET);
./CrossVul/dataset_final_sorted/CWE-416/c/good_3175_0
crossvul-cpp_data_good_3245_0
/* fe-netjoin.c : irssi Copyright (C) 2000 Timo Sirainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "module.h" #include "module-formats.h" #include "signals.h" #include "levels.h" #include "misc.h" #include "settings.h" #include "irc-servers.h" #include "modes.h" #include "ignore.h" #include "netsplit.h" #include "printtext.h" #define NETJOIN_WAIT_TIME 5 /* how many seconds to wait for the netsplitted JOIN messages to stop */ #define NETJOIN_MAX_WAIT 30 /* how many seconds to wait for nick to join to the rest of the channels she was before the netsplit */ typedef struct { char *nick; GSList *old_channels; GSList *now_channels; } NETJOIN_REC; typedef struct { IRC_SERVER_REC *server; time_t last_netjoin; GSList *netjoins; } NETJOIN_SERVER_REC; typedef struct { int count; GString *nicks; } TEMP_PRINT_REC; static int join_tag; static int netjoin_max_nicks, hide_netsplit_quits; static int printing_joins; static GSList *joinservers; static NETJOIN_SERVER_REC *netjoin_find_server(IRC_SERVER_REC *server) { GSList *tmp; g_return_val_if_fail(server != NULL, NULL); for (tmp = joinservers; tmp != NULL; tmp = tmp->next) { NETJOIN_SERVER_REC *rec = tmp->data; if (rec->server == server) return rec; } return NULL; } static NETJOIN_REC *netjoin_add(IRC_SERVER_REC *server, const char *nick, GSList *channels) { NETJOIN_REC *rec; NETJOIN_SERVER_REC *srec; g_return_val_if_fail(server != NULL, NULL); g_return_val_if_fail(nick != NULL, NULL); rec = g_new0(NETJOIN_REC, 1); rec->nick = g_strdup(nick); while (channels != NULL) { NETSPLIT_CHAN_REC *channel = channels->data; rec->old_channels = g_slist_append(rec->old_channels, g_strdup(channel->name)); channels = channels->next; } srec = netjoin_find_server(server); if (srec == NULL) { srec = g_new0(NETJOIN_SERVER_REC, 1); srec->server = server; joinservers = g_slist_append(joinservers, srec); } srec->last_netjoin = time(NULL); srec->netjoins = g_slist_append(srec->netjoins, rec); return rec; } static NETJOIN_REC *netjoin_find(IRC_SERVER_REC *server, const char *nick) { NETJOIN_SERVER_REC *srec; GSList *tmp; g_return_val_if_fail(server != NULL, NULL); g_return_val_if_fail(nick != NULL, NULL); srec = netjoin_find_server(server); if (srec == NULL) return NULL; for (tmp = srec->netjoins; tmp != NULL; tmp = tmp->next) { NETJOIN_REC *rec = tmp->data; if (g_ascii_strcasecmp(rec->nick, nick) == 0) return rec; } return NULL; } static void netjoin_remove(NETJOIN_SERVER_REC *server, NETJOIN_REC *rec) { server->netjoins = g_slist_remove(server->netjoins, rec); g_slist_foreach(rec->old_channels, (GFunc) g_free, NULL); g_slist_foreach(rec->now_channels, (GFunc) g_free, NULL); g_slist_free(rec->old_channels); g_slist_free(rec->now_channels); g_free(rec->nick); g_free(rec); } static void netjoin_server_remove(NETJOIN_SERVER_REC *server) { joinservers = g_slist_remove(joinservers, server); while (server->netjoins != NULL) netjoin_remove(server, server->netjoins->data); g_free(server); } static void print_channel_netjoins(char *channel, TEMP_PRINT_REC *rec, NETJOIN_SERVER_REC *server) { if (rec->nicks->len > 0) g_string_truncate(rec->nicks, rec->nicks->len-2); printformat(server->server, channel, MSGLEVEL_JOINS, rec->count > netjoin_max_nicks ? IRCTXT_NETSPLIT_JOIN_MORE : IRCTXT_NETSPLIT_JOIN, rec->nicks->str, rec->count-netjoin_max_nicks); g_string_free(rec->nicks, TRUE); g_free(rec); g_free(channel); } static void print_netjoins(NETJOIN_SERVER_REC *server, const char *filter_channel) { TEMP_PRINT_REC *temp; GHashTable *channels; GSList *tmp, *tmp2, *next, *next2, *old; g_return_if_fail(server != NULL); printing_joins = TRUE; /* save nicks to string, clear now_channels and remove the same channels from old_channels list */ channels = g_hash_table_new((GHashFunc) g_istr_hash, (GCompareFunc) g_istr_equal); for (tmp = server->netjoins; tmp != NULL; tmp = next) { NETJOIN_REC *rec = tmp->data; next = g_slist_next(tmp); for (tmp2 = rec->now_channels; tmp2 != NULL; tmp2 = next2) { char *channel = tmp2->data; char *realchannel = channel + 1; next2 = g_slist_next(tmp2); /* Filter the results by channel if asked to do so */ if (filter_channel != NULL && strcasecmp(realchannel, filter_channel) != 0) continue; temp = g_hash_table_lookup(channels, realchannel); if (temp == NULL) { temp = g_new0(TEMP_PRINT_REC, 1); temp->nicks = g_string_new(NULL); g_hash_table_insert(channels, g_strdup(realchannel), temp); } temp->count++; if (temp->count <= netjoin_max_nicks) { if (*channel != ' ') g_string_append_c(temp->nicks, *channel); g_string_append_printf(temp->nicks, "%s, ", rec->nick); } /* remove the channel from old_channels too */ old = gslist_find_icase_string(rec->old_channels, realchannel); if (old != NULL) { void *data = old->data; rec->old_channels = g_slist_remove(rec->old_channels, data); g_free(data); } /* drop tmp2 from the list */ rec->now_channels = g_slist_delete_link(rec->now_channels, tmp2); g_free(channel); } if (rec->old_channels == NULL) netjoin_remove(server, rec); } g_hash_table_foreach(channels, (GHFunc) print_channel_netjoins, server); g_hash_table_destroy(channels); if (server->netjoins == NULL) netjoin_server_remove(server); printing_joins = FALSE; } /* something is going to be printed to screen, print our current netsplit message before it. */ static void sig_print_starting(TEXT_DEST_REC *dest) { NETJOIN_SERVER_REC *rec; if (printing_joins) return; if (!IS_IRC_SERVER(dest->server)) return; if (!(dest->level & MSGLEVEL_PUBLIC)) return; if (!server_ischannel(dest->server, dest->target)) return; rec = netjoin_find_server(IRC_SERVER(dest->server)); if (rec != NULL && rec->netjoins != NULL) print_netjoins(rec, dest->target); } static int sig_check_netjoins(void) { GSList *tmp, *next; int diff; time_t now; now = time(NULL); /* first print all netjoins which haven't had any new joins * for NETJOIN_WAIT_TIME; this may cause them to be removed * (all users who rejoined, rejoined all channels) */ for (tmp = joinservers; tmp != NULL; tmp = next) { NETJOIN_SERVER_REC *server = tmp->data; next = tmp->next; diff = now-server->last_netjoin; if (diff <= NETJOIN_WAIT_TIME) { /* wait for more JOINs */ continue; } if (server->netjoins != NULL) print_netjoins(server, NULL); } /* now remove all netjoins which haven't had any new joins * for NETJOIN_MAX_WAIT (user rejoined some but not all channels * after split) */ for (tmp = joinservers; tmp != NULL; tmp = next) { NETJOIN_SERVER_REC *server = tmp->data; next = tmp->next; diff = now-server->last_netjoin; if (diff >= NETJOIN_MAX_WAIT) { /* waited long enough, forget about the rest */ netjoin_server_remove(server); } } if (joinservers == NULL) { g_source_remove(join_tag); signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting); join_tag = -1; } return 1; } static void msg_quit(IRC_SERVER_REC *server, const char *nick, const char *address, const char *reason) { if (IS_IRC_SERVER(server) && quitmsg_is_split(reason)) signal_stop(); } static void msg_join(IRC_SERVER_REC *server, const char *channel, const char *nick, const char *address) { NETSPLIT_REC *split; NETJOIN_REC *netjoin; GSList *channels; int rejoin = 1; if (!IS_IRC_SERVER(server)) return; if (ignore_check(SERVER(server), nick, address, channel, NULL, MSGLEVEL_JOINS)) return; split = netsplit_find(server, nick, address); netjoin = netjoin_find(server, nick); if (split == NULL && netjoin == NULL) return; /* if this was not a channel they split from, treat it normally */ if (netjoin != NULL) { if (!gslist_find_icase_string(netjoin->old_channels, channel)) return; } else { channels = split->channels; while (channels != NULL) { NETSPLIT_CHAN_REC *schannel = channels->data; if (!strcasecmp(schannel->name, channel)) break; channels = channels->next; } /* we still need to create a NETJOIN_REC now as the * NETSPLIT_REC will be destroyed */ if (channels == NULL) rejoin = 0; } if (join_tag == -1) { join_tag = g_timeout_add(1000, (GSourceFunc) sig_check_netjoins, NULL); signal_add("print starting", (SIGNAL_FUNC) sig_print_starting); } if (netjoin == NULL) netjoin = netjoin_add(server, nick, split->channels); if (rejoin) { netjoin->now_channels = g_slist_append(netjoin->now_channels, g_strconcat(" ", channel, NULL)); signal_stop(); } } static int netjoin_set_nickmode(IRC_SERVER_REC *server, NETJOIN_REC *rec, const char *channel, char prefix) { GSList *pos; const char *flags; char *found_chan = NULL; for (pos = rec->now_channels; pos != NULL; pos = pos->next) { char *chan = pos->data; if (strcasecmp(chan+1, channel) == 0) { found_chan = chan; break; } } if (found_chan == NULL) return FALSE; flags = server->get_nick_flags(SERVER(server)); while (*flags != '\0') { if (found_chan[0] == *flags) break; if (prefix == *flags) { found_chan[0] = prefix; break; } flags++; } return TRUE; } static void msg_mode(IRC_SERVER_REC *server, const char *channel, const char *sender, const char *addr, const char *data) { NETJOIN_REC *rec; char *params, *mode, *nicks; char **nicklist, **nick, type, prefix; int show; g_return_if_fail(data != NULL); if (!server_ischannel(SERVER(server), channel) || addr != NULL) return; params = event_get_params(data, 2 | PARAM_FLAG_GETREST, &mode, &nicks); /* parse server mode changes - hide operator status changes and show them in the netjoin message instead as @ before the nick */ nick = nicklist = g_strsplit(nicks, " ", -1); type = '+'; show = FALSE; for (; *mode != '\0'; mode++) { if (*mode == '+' || *mode == '-') { type = *mode; continue; } if (*nick != NULL && GET_MODE_PREFIX(server, *mode)) { /* give/remove ops */ rec = netjoin_find(server, *nick); prefix = GET_MODE_PREFIX(server, *mode); if (rec == NULL || type != '+' || prefix == '\0' || !netjoin_set_nickmode(server, rec, channel, prefix)) show = TRUE; nick++; } else { if (HAS_MODE_ARG(server, type, *mode) && *nick != NULL) nick++; show = TRUE; } } if (!show) signal_stop(); g_strfreev(nicklist); g_free(params); } static void read_settings(void) { int old_hide; old_hide = hide_netsplit_quits; hide_netsplit_quits = settings_get_bool("hide_netsplit_quits"); netjoin_max_nicks = settings_get_int("netjoin_max_nicks"); if (old_hide && !hide_netsplit_quits) { signal_remove("message quit", (SIGNAL_FUNC) msg_quit); signal_remove("message join", (SIGNAL_FUNC) msg_join); signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode); } else if (!old_hide && hide_netsplit_quits) { signal_add("message quit", (SIGNAL_FUNC) msg_quit); signal_add("message join", (SIGNAL_FUNC) msg_join); signal_add("message irc mode", (SIGNAL_FUNC) msg_mode); } } static void sig_server_disconnected(IRC_SERVER_REC *server) { NETJOIN_SERVER_REC *netjoin_server; g_return_if_fail(server != NULL); if (!IS_IRC_SERVER(server)) return; if ((netjoin_server = netjoin_find_server(server))) { netjoin_server_remove(netjoin_server); } } void fe_netjoin_init(void) { settings_add_bool("misc", "hide_netsplit_quits", TRUE); settings_add_int("misc", "netjoin_max_nicks", 10); join_tag = -1; printing_joins = FALSE; read_settings(); signal_add("setup changed", (SIGNAL_FUNC) read_settings); signal_add("server disconnected", (SIGNAL_FUNC) sig_server_disconnected); } void fe_netjoin_deinit(void) { while (joinservers != NULL) netjoin_server_remove(joinservers->data); if (join_tag != -1) { g_source_remove(join_tag); signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting); } signal_remove("setup changed", (SIGNAL_FUNC) read_settings); signal_remove("server disconnected", (SIGNAL_FUNC) sig_server_disconnected); signal_remove("message quit", (SIGNAL_FUNC) msg_quit); signal_remove("message join", (SIGNAL_FUNC) msg_join); signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode); }
./CrossVul/dataset_final_sorted/CWE-416/c/good_3245_0
crossvul-cpp_data_good_2550_0
/* * fs/timerfd.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * * * Thanks to Thomas Gleixner for code reviews and useful comments. * */ #include <linux/alarmtimer.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/anon_inodes.h> #include <linux/timerfd.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/rcupdate.h> struct timerfd_ctx { union { struct hrtimer tmr; struct alarm alarm; } t; ktime_t tintv; ktime_t moffs; wait_queue_head_t wqh; u64 ticks; int clockid; short unsigned expired; short unsigned settime_flags; /* to show in fdinfo */ struct rcu_head rcu; struct list_head clist; spinlock_t cancel_lock; bool might_cancel; }; static LIST_HEAD(cancel_list); static DEFINE_SPINLOCK(cancel_lock); static inline bool isalarm(struct timerfd_ctx *ctx) { return ctx->clockid == CLOCK_REALTIME_ALARM || ctx->clockid == CLOCK_BOOTTIME_ALARM; } /* * This gets called when the timer event triggers. We set the "expired" * flag, but we do not re-arm the timer (in case it's necessary, * tintv != 0) until the timer is accessed. */ static void timerfd_triggered(struct timerfd_ctx *ctx) { unsigned long flags; spin_lock_irqsave(&ctx->wqh.lock, flags); ctx->expired = 1; ctx->ticks++; wake_up_locked(&ctx->wqh); spin_unlock_irqrestore(&ctx->wqh.lock, flags); } static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) { struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, t.tmr); timerfd_triggered(ctx); return HRTIMER_NORESTART; } static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm, ktime_t now) { struct timerfd_ctx *ctx = container_of(alarm, struct timerfd_ctx, t.alarm); timerfd_triggered(ctx); return ALARMTIMER_NORESTART; } /* * Called when the clock was set to cancel the timers in the cancel * list. This will wake up processes waiting on these timers. The * wake-up requires ctx->ticks to be non zero, therefore we increment * it before calling wake_up_locked(). */ void timerfd_clock_was_set(void) { ktime_t moffs = ktime_mono_to_real(0); struct timerfd_ctx *ctx; unsigned long flags; rcu_read_lock(); list_for_each_entry_rcu(ctx, &cancel_list, clist) { if (!ctx->might_cancel) continue; spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->moffs != moffs) { ctx->moffs = KTIME_MAX; ctx->ticks++; wake_up_locked(&ctx->wqh); } spin_unlock_irqrestore(&ctx->wqh.lock, flags); } rcu_read_unlock(); } static void __timerfd_remove_cancel(struct timerfd_ctx *ctx) { if (ctx->might_cancel) { ctx->might_cancel = false; spin_lock(&cancel_lock); list_del_rcu(&ctx->clist); spin_unlock(&cancel_lock); } } static void timerfd_remove_cancel(struct timerfd_ctx *ctx) { spin_lock(&ctx->cancel_lock); __timerfd_remove_cancel(ctx); spin_unlock(&ctx->cancel_lock); } static bool timerfd_canceled(struct timerfd_ctx *ctx) { if (!ctx->might_cancel || ctx->moffs != KTIME_MAX) return false; ctx->moffs = ktime_mono_to_real(0); return true; } static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) { spin_lock(&ctx->cancel_lock); if ((ctx->clockid == CLOCK_REALTIME || ctx->clockid == CLOCK_REALTIME_ALARM) && (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { if (!ctx->might_cancel) { ctx->might_cancel = true; spin_lock(&cancel_lock); list_add_rcu(&ctx->clist, &cancel_list); spin_unlock(&cancel_lock); } } else { __timerfd_remove_cancel(ctx); } spin_unlock(&ctx->cancel_lock); } static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) { ktime_t remaining; if (isalarm(ctx)) remaining = alarm_expires_remaining(&ctx->t.alarm); else remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr); return remaining < 0 ? 0: remaining; } static int timerfd_setup(struct timerfd_ctx *ctx, int flags, const struct itimerspec *ktmr) { enum hrtimer_mode htmode; ktime_t texp; int clockid = ctx->clockid; htmode = (flags & TFD_TIMER_ABSTIME) ? HRTIMER_MODE_ABS: HRTIMER_MODE_REL; texp = timespec_to_ktime(ktmr->it_value); ctx->expired = 0; ctx->ticks = 0; ctx->tintv = timespec_to_ktime(ktmr->it_interval); if (isalarm(ctx)) { alarm_init(&ctx->t.alarm, ctx->clockid == CLOCK_REALTIME_ALARM ? ALARM_REALTIME : ALARM_BOOTTIME, timerfd_alarmproc); } else { hrtimer_init(&ctx->t.tmr, clockid, htmode); hrtimer_set_expires(&ctx->t.tmr, texp); ctx->t.tmr.function = timerfd_tmrproc; } if (texp != 0) { if (isalarm(ctx)) { if (flags & TFD_TIMER_ABSTIME) alarm_start(&ctx->t.alarm, texp); else alarm_start_relative(&ctx->t.alarm, texp); } else { hrtimer_start(&ctx->t.tmr, texp, htmode); } if (timerfd_canceled(ctx)) return -ECANCELED; } ctx->settime_flags = flags & TFD_SETTIME_FLAGS; return 0; } static int timerfd_release(struct inode *inode, struct file *file) { struct timerfd_ctx *ctx = file->private_data; timerfd_remove_cancel(ctx); if (isalarm(ctx)) alarm_cancel(&ctx->t.alarm); else hrtimer_cancel(&ctx->t.tmr); kfree_rcu(ctx, rcu); return 0; } static unsigned int timerfd_poll(struct file *file, poll_table *wait) { struct timerfd_ctx *ctx = file->private_data; unsigned int events = 0; unsigned long flags; poll_wait(file, &ctx->wqh, wait); spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->ticks) events |= POLLIN; spin_unlock_irqrestore(&ctx->wqh.lock, flags); return events; } static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct timerfd_ctx *ctx = file->private_data; ssize_t res; u64 ticks = 0; if (count < sizeof(ticks)) return -EINVAL; spin_lock_irq(&ctx->wqh.lock); if (file->f_flags & O_NONBLOCK) res = -EAGAIN; else res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks); /* * If clock has changed, we do not care about the * ticks and we do not rearm the timer. Userspace must * reevaluate anyway. */ if (timerfd_canceled(ctx)) { ctx->ticks = 0; ctx->expired = 0; res = -ECANCELED; } if (ctx->ticks) { ticks = ctx->ticks; if (ctx->expired && ctx->tintv) { /* * If tintv != 0, this is a periodic timer that * needs to be re-armed. We avoid doing it in the timer * callback to avoid DoS attacks specifying a very * short timer period. */ if (isalarm(ctx)) { ticks += alarm_forward_now( &ctx->t.alarm, ctx->tintv) - 1; alarm_restart(&ctx->t.alarm); } else { ticks += hrtimer_forward_now(&ctx->t.tmr, ctx->tintv) - 1; hrtimer_restart(&ctx->t.tmr); } } ctx->expired = 0; ctx->ticks = 0; } spin_unlock_irq(&ctx->wqh.lock); if (ticks) res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks); return res; } #ifdef CONFIG_PROC_FS static void timerfd_show(struct seq_file *m, struct file *file) { struct timerfd_ctx *ctx = file->private_data; struct itimerspec t; spin_lock_irq(&ctx->wqh.lock); t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); t.it_interval = ktime_to_timespec(ctx->tintv); spin_unlock_irq(&ctx->wqh.lock); seq_printf(m, "clockid: %d\n" "ticks: %llu\n" "settime flags: 0%o\n" "it_value: (%llu, %llu)\n" "it_interval: (%llu, %llu)\n", ctx->clockid, (unsigned long long)ctx->ticks, ctx->settime_flags, (unsigned long long)t.it_value.tv_sec, (unsigned long long)t.it_value.tv_nsec, (unsigned long long)t.it_interval.tv_sec, (unsigned long long)t.it_interval.tv_nsec); } #else #define timerfd_show NULL #endif #ifdef CONFIG_CHECKPOINT_RESTORE static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct timerfd_ctx *ctx = file->private_data; int ret = 0; switch (cmd) { case TFD_IOC_SET_TICKS: { u64 ticks; if (copy_from_user(&ticks, (u64 __user *)arg, sizeof(ticks))) return -EFAULT; if (!ticks) return -EINVAL; spin_lock_irq(&ctx->wqh.lock); if (!timerfd_canceled(ctx)) { ctx->ticks = ticks; wake_up_locked(&ctx->wqh); } else ret = -ECANCELED; spin_unlock_irq(&ctx->wqh.lock); break; } default: ret = -ENOTTY; break; } return ret; } #else #define timerfd_ioctl NULL #endif static const struct file_operations timerfd_fops = { .release = timerfd_release, .poll = timerfd_poll, .read = timerfd_read, .llseek = noop_llseek, .show_fdinfo = timerfd_show, .unlocked_ioctl = timerfd_ioctl, }; static int timerfd_fget(int fd, struct fd *p) { struct fd f = fdget(fd); if (!f.file) return -EBADF; if (f.file->f_op != &timerfd_fops) { fdput(f); return -EINVAL; } *p = f; return 0; } SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) { int ufd; struct timerfd_ctx *ctx; /* Check the TFD_* constants for consistency. */ BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK); if ((flags & ~TFD_CREATE_FLAGS) || (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME && clockid != CLOCK_REALTIME_ALARM && clockid != CLOCK_BOOTTIME && clockid != CLOCK_BOOTTIME_ALARM)) return -EINVAL; if (!capable(CAP_WAKE_ALARM) && (clockid == CLOCK_REALTIME_ALARM || clockid == CLOCK_BOOTTIME_ALARM)) return -EPERM; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; init_waitqueue_head(&ctx->wqh); spin_lock_init(&ctx->cancel_lock); ctx->clockid = clockid; if (isalarm(ctx)) alarm_init(&ctx->t.alarm, ctx->clockid == CLOCK_REALTIME_ALARM ? ALARM_REALTIME : ALARM_BOOTTIME, timerfd_alarmproc); else hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); ctx->moffs = ktime_mono_to_real(0); ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); if (ufd < 0) kfree(ctx); return ufd; } static int do_timerfd_settime(int ufd, int flags, const struct itimerspec *new, struct itimerspec *old) { struct fd f; struct timerfd_ctx *ctx; int ret; if ((flags & ~TFD_SETTIME_FLAGS) || !timespec_valid(&new->it_value) || !timespec_valid(&new->it_interval)) return -EINVAL; ret = timerfd_fget(ufd, &f); if (ret) return ret; ctx = f.file->private_data; if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) { fdput(f); return -EPERM; } timerfd_setup_cancel(ctx, flags); /* * We need to stop the existing timer before reprogramming * it to the new values. */ for (;;) { spin_lock_irq(&ctx->wqh.lock); if (isalarm(ctx)) { if (alarm_try_to_cancel(&ctx->t.alarm) >= 0) break; } else { if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0) break; } spin_unlock_irq(&ctx->wqh.lock); cpu_relax(); } /* * If the timer is expired and it's periodic, we need to advance it * because the caller may want to know the previous expiration time. * We do not update "ticks" and "expired" since the timer will be * re-programmed again in the following timerfd_setup() call. */ if (ctx->expired && ctx->tintv) { if (isalarm(ctx)) alarm_forward_now(&ctx->t.alarm, ctx->tintv); else hrtimer_forward_now(&ctx->t.tmr, ctx->tintv); } old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); old->it_interval = ktime_to_timespec(ctx->tintv); /* * Re-program the timer to the new value ... */ ret = timerfd_setup(ctx, flags, new); spin_unlock_irq(&ctx->wqh.lock); fdput(f); return ret; } static int do_timerfd_gettime(int ufd, struct itimerspec *t) { struct fd f; struct timerfd_ctx *ctx; int ret = timerfd_fget(ufd, &f); if (ret) return ret; ctx = f.file->private_data; spin_lock_irq(&ctx->wqh.lock); if (ctx->expired && ctx->tintv) { ctx->expired = 0; if (isalarm(ctx)) { ctx->ticks += alarm_forward_now( &ctx->t.alarm, ctx->tintv) - 1; alarm_restart(&ctx->t.alarm); } else { ctx->ticks += hrtimer_forward_now(&ctx->t.tmr, ctx->tintv) - 1; hrtimer_restart(&ctx->t.tmr); } } t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); t->it_interval = ktime_to_timespec(ctx->tintv); spin_unlock_irq(&ctx->wqh.lock); fdput(f); return 0; } SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, const struct itimerspec __user *, utmr, struct itimerspec __user *, otmr) { struct itimerspec new, old; int ret; if (copy_from_user(&new, utmr, sizeof(new))) return -EFAULT; ret = do_timerfd_settime(ufd, flags, &new, &old); if (ret) return ret; if (otmr && copy_to_user(otmr, &old, sizeof(old))) return -EFAULT; return ret; } SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr) { struct itimerspec kotmr; int ret = do_timerfd_gettime(ufd, &kotmr); if (ret) return ret; return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, const struct compat_itimerspec __user *, utmr, struct compat_itimerspec __user *, otmr) { struct itimerspec new, old; int ret; if (get_compat_itimerspec(&new, utmr)) return -EFAULT; ret = do_timerfd_settime(ufd, flags, &new, &old); if (ret) return ret; if (otmr && put_compat_itimerspec(otmr, &old)) return -EFAULT; return ret; } COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct compat_itimerspec __user *, otmr) { struct itimerspec kotmr; int ret = do_timerfd_gettime(ufd, &kotmr); if (ret) return ret; return put_compat_itimerspec(otmr, &kotmr) ? -EFAULT: 0; } #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_2550_0
crossvul-cpp_data_good_3269_0
/* * Implements an IPX socket layer. * * This code is derived from work by * Ross Biro : Writing the original IP stack * Fred Van Kempen : Tidying up the TCP/IP * * Many thanks go to Keith Baker, Institute For Industrial Information * Technology Ltd, Swansea University for allowing me to work on this * in my own time even though it was in some ways related to commercial * work I am currently employed to do there. * * All the material in this file is subject to the Gnu license version 2. * Neither Alan Cox nor the Swansea University Computer Society admit * liability nor provide warranty for any of this software. This material * is provided as is and at no charge. * * Portions Copyright (c) 2000-2003 Conectiva, Inc. <acme@conectiva.com.br> * Neither Arnaldo Carvalho de Melo nor Conectiva, Inc. admit liability nor * provide warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * Portions Copyright (c) 1995 Caldera, Inc. <greg@caldera.com> * Neither Greg Page nor Caldera, Inc. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * See net/ipx/ChangeLog. */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/ipx.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/uio.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/string.h> #include <linux/types.h> #include <linux/termios.h> #include <net/ipx.h> #include <net/p8022.h> #include <net/psnap.h> #include <net/sock.h> #include <net/datalink.h> #include <net/tcp_states.h> #include <net/net_namespace.h> #include <linux/uaccess.h> /* Configuration Variables */ static unsigned char ipxcfg_max_hops = 16; static char ipxcfg_auto_select_primary; static char ipxcfg_auto_create_interfaces; int sysctl_ipx_pprop_broadcasting = 1; /* Global Variables */ static struct datalink_proto *p8022_datalink; static struct datalink_proto *pEII_datalink; static struct datalink_proto *p8023_datalink; static struct datalink_proto *pSNAP_datalink; static const struct proto_ops ipx_dgram_ops; LIST_HEAD(ipx_interfaces); DEFINE_SPINLOCK(ipx_interfaces_lock); struct ipx_interface *ipx_primary_net; struct ipx_interface *ipx_internal_net; struct ipx_interface *ipx_interfaces_head(void) { struct ipx_interface *rc = NULL; if (!list_empty(&ipx_interfaces)) rc = list_entry(ipx_interfaces.next, struct ipx_interface, node); return rc; } static void ipxcfg_set_auto_select(char val) { ipxcfg_auto_select_primary = val; if (val && !ipx_primary_net) ipx_primary_net = ipx_interfaces_head(); } static int ipxcfg_get_config_data(struct ipx_config_data __user *arg) { struct ipx_config_data vals; vals.ipxcfg_auto_create_interfaces = ipxcfg_auto_create_interfaces; vals.ipxcfg_auto_select_primary = ipxcfg_auto_select_primary; return copy_to_user(arg, &vals, sizeof(vals)) ? -EFAULT : 0; } /* * Note: Sockets may not be removed _during_ an interrupt or inet_bh * handler using this technique. They can be added although we do not * use this facility. */ static void ipx_remove_socket(struct sock *sk) { /* Determine interface with which socket is associated */ struct ipx_interface *intrfc = ipx_sk(sk)->intrfc; if (!intrfc) goto out; ipxitf_hold(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); sk_del_node_init(sk); spin_unlock_bh(&intrfc->if_sklist_lock); ipxitf_put(intrfc); out: return; } static void ipx_destroy_socket(struct sock *sk) { ipx_remove_socket(sk); skb_queue_purge(&sk->sk_receive_queue); sk_refcnt_debug_dec(sk); } /* * The following code is used to support IPX Interfaces (IPXITF). An * IPX interface is defined by a physical device and a frame type. */ /* ipxitf_clear_primary_net has to be called with ipx_interfaces_lock held */ static void ipxitf_clear_primary_net(void) { ipx_primary_net = NULL; if (ipxcfg_auto_select_primary) ipx_primary_net = ipx_interfaces_head(); } static struct ipx_interface *__ipxitf_find_using_phys(struct net_device *dev, __be16 datalink) { struct ipx_interface *i; list_for_each_entry(i, &ipx_interfaces, node) if (i->if_dev == dev && i->if_dlink_type == datalink) goto out; i = NULL; out: return i; } static struct ipx_interface *ipxitf_find_using_phys(struct net_device *dev, __be16 datalink) { struct ipx_interface *i; spin_lock_bh(&ipx_interfaces_lock); i = __ipxitf_find_using_phys(dev, datalink); if (i) ipxitf_hold(i); spin_unlock_bh(&ipx_interfaces_lock); return i; } struct ipx_interface *ipxitf_find_using_net(__be32 net) { struct ipx_interface *i; spin_lock_bh(&ipx_interfaces_lock); if (net) { list_for_each_entry(i, &ipx_interfaces, node) if (i->if_netnum == net) goto hold; i = NULL; goto unlock; } i = ipx_primary_net; if (i) hold: ipxitf_hold(i); unlock: spin_unlock_bh(&ipx_interfaces_lock); return i; } /* Sockets are bound to a particular IPX interface. */ static void ipxitf_insert_socket(struct ipx_interface *intrfc, struct sock *sk) { ipxitf_hold(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); ipx_sk(sk)->intrfc = intrfc; sk_add_node(sk, &intrfc->if_sklist); spin_unlock_bh(&intrfc->if_sklist_lock); ipxitf_put(intrfc); } /* caller must hold intrfc->if_sklist_lock */ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc, __be16 port) { struct sock *s; sk_for_each(s, &intrfc->if_sklist) if (ipx_sk(s)->port == port) goto found; s = NULL; found: return s; } /* caller must hold a reference to intrfc */ static struct sock *ipxitf_find_socket(struct ipx_interface *intrfc, __be16 port) { struct sock *s; spin_lock_bh(&intrfc->if_sklist_lock); s = __ipxitf_find_socket(intrfc, port); if (s) sock_hold(s); spin_unlock_bh(&intrfc->if_sklist_lock); return s; } #ifdef CONFIG_IPX_INTERN static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc, unsigned char *ipx_node, __be16 port) { struct sock *s; ipxitf_hold(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); sk_for_each(s, &intrfc->if_sklist) { struct ipx_sock *ipxs = ipx_sk(s); if (ipxs->port == port && !memcmp(ipx_node, ipxs->node, IPX_NODE_LEN)) goto found; } s = NULL; found: spin_unlock_bh(&intrfc->if_sklist_lock); ipxitf_put(intrfc); return s; } #endif static void __ipxitf_down(struct ipx_interface *intrfc) { struct sock *s; struct hlist_node *t; /* Delete all routes associated with this interface */ ipxrtr_del_routes(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); /* error sockets */ sk_for_each_safe(s, t, &intrfc->if_sklist) { struct ipx_sock *ipxs = ipx_sk(s); s->sk_err = ENOLINK; s->sk_error_report(s); ipxs->intrfc = NULL; ipxs->port = 0; sock_set_flag(s, SOCK_ZAPPED); /* Indicates it is no longer bound */ sk_del_node_init(s); } INIT_HLIST_HEAD(&intrfc->if_sklist); spin_unlock_bh(&intrfc->if_sklist_lock); /* remove this interface from list */ list_del(&intrfc->node); /* remove this interface from *special* networks */ if (intrfc == ipx_primary_net) ipxitf_clear_primary_net(); if (intrfc == ipx_internal_net) ipx_internal_net = NULL; if (intrfc->if_dev) dev_put(intrfc->if_dev); kfree(intrfc); } void ipxitf_down(struct ipx_interface *intrfc) { spin_lock_bh(&ipx_interfaces_lock); __ipxitf_down(intrfc); spin_unlock_bh(&ipx_interfaces_lock); } static void __ipxitf_put(struct ipx_interface *intrfc) { if (atomic_dec_and_test(&intrfc->refcnt)) __ipxitf_down(intrfc); } static int ipxitf_device_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ipx_interface *i, *tmp; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN && event != NETDEV_UP) goto out; spin_lock_bh(&ipx_interfaces_lock); list_for_each_entry_safe(i, tmp, &ipx_interfaces, node) if (i->if_dev == dev) { if (event == NETDEV_UP) ipxitf_hold(i); else __ipxitf_put(i); } spin_unlock_bh(&ipx_interfaces_lock); out: return NOTIFY_DONE; } static __exit void ipxitf_cleanup(void) { struct ipx_interface *i, *tmp; spin_lock_bh(&ipx_interfaces_lock); list_for_each_entry_safe(i, tmp, &ipx_interfaces, node) __ipxitf_put(i); spin_unlock_bh(&ipx_interfaces_lock); } static void ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb) { if (sock_queue_rcv_skb(sock, skb) < 0) kfree_skb(skb); } /* * On input skb->sk is NULL. Nobody is charged for the memory. */ /* caller must hold a reference to intrfc */ #ifdef CONFIG_IPX_INTERN static int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy) { struct ipxhdr *ipx = ipx_hdr(skb); int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, IPX_NODE_LEN); struct sock *s; int rc; spin_lock_bh(&intrfc->if_sklist_lock); sk_for_each(s, &intrfc->if_sklist) { struct ipx_sock *ipxs = ipx_sk(s); if (ipxs->port == ipx->ipx_dest.sock && (is_broadcast || !memcmp(ipx->ipx_dest.node, ipxs->node, IPX_NODE_LEN))) { /* We found a socket to which to send */ struct sk_buff *skb1; if (copy) { skb1 = skb_clone(skb, GFP_ATOMIC); rc = -ENOMEM; if (!skb1) goto out; } else { skb1 = skb; copy = 1; /* skb may only be used once */ } ipxitf_def_skb_handler(s, skb1); /* On an external interface, one socket can listen */ if (intrfc != ipx_internal_net) break; } } /* skb was solely for us, and we did not make a copy, so free it. */ if (!copy) kfree_skb(skb); rc = 0; out: spin_unlock_bh(&intrfc->if_sklist_lock); return rc; } #else static struct sock *ncp_connection_hack(struct ipx_interface *intrfc, struct ipxhdr *ipx) { /* The packet's target is a NCP connection handler. We want to hand it * to the correct socket directly within the kernel, so that the * mars_nwe packet distribution process does not have to do it. Here we * only care about NCP and BURST packets. * * You might call this a hack, but believe me, you do not want a * complete NCP layer in the kernel, and this is VERY fast as well. */ struct sock *sk = NULL; int connection = 0; u8 *ncphdr = (u8 *)(ipx + 1); if (*ncphdr == 0x22 && *(ncphdr + 1) == 0x22) /* NCP request */ connection = (((int) *(ncphdr + 5)) << 8) | (int) *(ncphdr + 3); else if (*ncphdr == 0x77 && *(ncphdr + 1) == 0x77) /* BURST packet */ connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); if (connection) { /* Now we have to look for a special NCP connection handling * socket. Only these sockets have ipx_ncp_conn != 0, set by * SIOCIPXNCPCONN. */ spin_lock_bh(&intrfc->if_sklist_lock); sk_for_each(sk, &intrfc->if_sklist) if (ipx_sk(sk)->ipx_ncp_conn == connection) { sock_hold(sk); goto found; } sk = NULL; found: spin_unlock_bh(&intrfc->if_sklist_lock); } return sk; } static int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy) { struct ipxhdr *ipx = ipx_hdr(skb); struct sock *sock1 = NULL, *sock2 = NULL; struct sk_buff *skb1 = NULL, *skb2 = NULL; int rc; if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451) sock1 = ncp_connection_hack(intrfc, ipx); if (!sock1) /* No special socket found, forward the packet the normal way */ sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock); /* * We need to check if there is a primary net and if * this is addressed to one of the *SPECIAL* sockets because * these need to be propagated to the primary net. * The *SPECIAL* socket list contains: 0x452(SAP), 0x453(RIP) and * 0x456(Diagnostic). */ if (ipx_primary_net && intrfc != ipx_primary_net) { const int dsock = ntohs(ipx->ipx_dest.sock); if (dsock == 0x452 || dsock == 0x453 || dsock == 0x456) /* The appropriate thing to do here is to dup the * packet and route to the primary net interface via * ipxitf_send; however, we'll cheat and just demux it * here. */ sock2 = ipxitf_find_socket(ipx_primary_net, ipx->ipx_dest.sock); } /* * If there is nothing to do return. The kfree will cancel any charging. */ rc = 0; if (!sock1 && !sock2) { if (!copy) kfree_skb(skb); goto out; } /* * This next segment of code is a little awkward, but it sets it up * so that the appropriate number of copies of the SKB are made and * that skb1 and skb2 point to it (them) so that it (they) can be * demuxed to sock1 and/or sock2. If we are unable to make enough * copies, we do as much as is possible. */ if (copy) skb1 = skb_clone(skb, GFP_ATOMIC); else skb1 = skb; rc = -ENOMEM; if (!skb1) goto out_put; /* Do we need 2 SKBs? */ if (sock1 && sock2) skb2 = skb_clone(skb1, GFP_ATOMIC); else skb2 = skb1; if (sock1) ipxitf_def_skb_handler(sock1, skb1); if (!skb2) goto out_put; if (sock2) ipxitf_def_skb_handler(sock2, skb2); rc = 0; out_put: if (sock1) sock_put(sock1); if (sock2) sock_put(sock2); out: return rc; } #endif /* CONFIG_IPX_INTERN */ static struct sk_buff *ipxitf_adjust_skbuff(struct ipx_interface *intrfc, struct sk_buff *skb) { struct sk_buff *skb2; int in_offset = (unsigned char *)ipx_hdr(skb) - skb->head; int out_offset = intrfc->if_ipx_offset; int len; /* Hopefully, most cases */ if (in_offset >= out_offset) return skb; /* Need new SKB */ len = skb->len + out_offset; skb2 = alloc_skb(len, GFP_ATOMIC); if (skb2) { skb_reserve(skb2, out_offset); skb_reset_network_header(skb2); skb_reset_transport_header(skb2); skb_put(skb2, skb->len); memcpy(ipx_hdr(skb2), ipx_hdr(skb), skb->len); memcpy(skb2->cb, skb->cb, sizeof(skb->cb)); } kfree_skb(skb); return skb2; } /* caller must hold a reference to intrfc and the skb has to be unshared */ int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node) { struct ipxhdr *ipx = ipx_hdr(skb); struct net_device *dev = intrfc->if_dev; struct datalink_proto *dl = intrfc->if_dlink; char dest_node[IPX_NODE_LEN]; int send_to_wire = 1; int addr_len; ipx->ipx_tctrl = IPX_SKB_CB(skb)->ipx_tctrl; ipx->ipx_dest.net = IPX_SKB_CB(skb)->ipx_dest_net; ipx->ipx_source.net = IPX_SKB_CB(skb)->ipx_source_net; /* see if we need to include the netnum in the route list */ if (IPX_SKB_CB(skb)->last_hop.index >= 0) { __be32 *last_hop = (__be32 *)(((u8 *) skb->data) + sizeof(struct ipxhdr) + IPX_SKB_CB(skb)->last_hop.index * sizeof(__be32)); *last_hop = IPX_SKB_CB(skb)->last_hop.netnum; IPX_SKB_CB(skb)->last_hop.index = -1; } /* * We need to know how many skbuffs it will take to send out this * packet to avoid unnecessary copies. */ if (!dl || !dev || dev->flags & IFF_LOOPBACK) send_to_wire = 0; /* No non looped */ /* * See if this should be demuxed to sockets on this interface * * We want to ensure the original was eaten or that we only use * up clones. */ if (ipx->ipx_dest.net == intrfc->if_netnum) { /* * To our own node, loop and free the original. * The internal net will receive on all node address. */ if (intrfc == ipx_internal_net || !memcmp(intrfc->if_node, node, IPX_NODE_LEN)) { /* Don't charge sender */ skb_orphan(skb); /* Will charge receiver */ return ipxitf_demux_socket(intrfc, skb, 0); } /* Broadcast, loop and possibly keep to send on. */ if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) { if (!send_to_wire) skb_orphan(skb); ipxitf_demux_socket(intrfc, skb, send_to_wire); if (!send_to_wire) goto out; } } /* * If the originating net is not equal to our net; this is routed * We are still charging the sender. Which is right - the driver * free will handle this fairly. */ if (ipx->ipx_source.net != intrfc->if_netnum) { /* * Unshare the buffer before modifying the count in * case it's a flood or tcpdump */ skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) goto out; if (++ipx->ipx_tctrl > ipxcfg_max_hops) send_to_wire = 0; } if (!send_to_wire) { kfree_skb(skb); goto out; } /* Determine the appropriate hardware address */ addr_len = dev->addr_len; if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) memcpy(dest_node, dev->broadcast, addr_len); else memcpy(dest_node, &(node[IPX_NODE_LEN-addr_len]), addr_len); /* Make any compensation for differing physical/data link size */ skb = ipxitf_adjust_skbuff(intrfc, skb); if (!skb) goto out; /* set up data link and physical headers */ skb->dev = dev; skb->protocol = htons(ETH_P_IPX); /* Send it out */ dl->request(dl, skb, dest_node); out: return 0; } static int ipxitf_add_local_route(struct ipx_interface *intrfc) { return ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL); } static void ipxitf_discover_netnum(struct ipx_interface *intrfc, struct sk_buff *skb); static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb); static int ipxitf_rcv(struct ipx_interface *intrfc, struct sk_buff *skb) { struct ipxhdr *ipx = ipx_hdr(skb); int rc = 0; ipxitf_hold(intrfc); /* See if we should update our network number */ if (!intrfc->if_netnum) /* net number of intrfc not known yet */ ipxitf_discover_netnum(intrfc, skb); IPX_SKB_CB(skb)->last_hop.index = -1; if (ipx->ipx_type == IPX_TYPE_PPROP) { rc = ipxitf_pprop(intrfc, skb); if (rc) goto out_free_skb; } /* local processing follows */ if (!IPX_SKB_CB(skb)->ipx_dest_net) IPX_SKB_CB(skb)->ipx_dest_net = intrfc->if_netnum; if (!IPX_SKB_CB(skb)->ipx_source_net) IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum; /* it doesn't make sense to route a pprop packet, there's no meaning * in the ipx_dest_net for such packets */ if (ipx->ipx_type != IPX_TYPE_PPROP && intrfc->if_netnum != IPX_SKB_CB(skb)->ipx_dest_net) { /* We only route point-to-point packets. */ if (skb->pkt_type == PACKET_HOST) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb) rc = ipxrtr_route_skb(skb); goto out_intrfc; } goto out_free_skb; } /* see if we should keep it */ if (!memcmp(ipx_broadcast_node, ipx->ipx_dest.node, IPX_NODE_LEN) || !memcmp(intrfc->if_node, ipx->ipx_dest.node, IPX_NODE_LEN)) { rc = ipxitf_demux_socket(intrfc, skb, 0); goto out_intrfc; } /* we couldn't pawn it off so unload it */ out_free_skb: kfree_skb(skb); out_intrfc: ipxitf_put(intrfc); return rc; } static void ipxitf_discover_netnum(struct ipx_interface *intrfc, struct sk_buff *skb) { const struct ipx_cb *cb = IPX_SKB_CB(skb); /* see if this is an intra packet: source_net == dest_net */ if (cb->ipx_source_net == cb->ipx_dest_net && cb->ipx_source_net) { struct ipx_interface *i = ipxitf_find_using_net(cb->ipx_source_net); /* NB: NetWare servers lie about their hop count so we * dropped the test based on it. This is the best way * to determine this is a 0 hop count packet. */ if (!i) { intrfc->if_netnum = cb->ipx_source_net; ipxitf_add_local_route(intrfc); } else { printk(KERN_WARNING "IPX: Network number collision " "%lx\n %s %s and %s %s\n", (unsigned long) ntohl(cb->ipx_source_net), ipx_device_name(i), ipx_frame_name(i->if_dlink_type), ipx_device_name(intrfc), ipx_frame_name(intrfc->if_dlink_type)); ipxitf_put(i); } } } /** * ipxitf_pprop - Process packet propagation IPX packet type 0x14, used for * NetBIOS broadcasts * @intrfc: IPX interface receiving this packet * @skb: Received packet * * Checks if packet is valid: if its more than %IPX_MAX_PPROP_HOPS hops or if it * is smaller than a IPX header + the room for %IPX_MAX_PPROP_HOPS hops we drop * it, not even processing it locally, if it has exact %IPX_MAX_PPROP_HOPS we * don't broadcast it, but process it locally. See chapter 5 of Novell's "IPX * RIP and SAP Router Specification", Part Number 107-000029-001. * * If it is valid, check if we have pprop broadcasting enabled by the user, * if not, just return zero for local processing. * * If it is enabled check the packet and don't broadcast it if we have already * seen this packet. * * Broadcast: send it to the interfaces that aren't on the packet visited nets * array, just after the IPX header. * * Returns -EINVAL for invalid packets, so that the calling function drops * the packet without local processing. 0 if packet is to be locally processed. */ static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb) { struct ipxhdr *ipx = ipx_hdr(skb); int i, rc = -EINVAL; struct ipx_interface *ifcs; char *c; __be32 *l; /* Illegal packet - too many hops or too short */ /* We decide to throw it away: no broadcasting, no local processing. * NetBIOS unaware implementations route them as normal packets - * tctrl <= 15, any data payload... */ if (IPX_SKB_CB(skb)->ipx_tctrl > IPX_MAX_PPROP_HOPS || ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr) + IPX_MAX_PPROP_HOPS * sizeof(u32)) goto out; /* are we broadcasting this damn thing? */ rc = 0; if (!sysctl_ipx_pprop_broadcasting) goto out; /* We do broadcast packet on the IPX_MAX_PPROP_HOPS hop, but we * process it locally. All previous hops broadcasted it, and process it * locally. */ if (IPX_SKB_CB(skb)->ipx_tctrl == IPX_MAX_PPROP_HOPS) goto out; c = ((u8 *) ipx) + sizeof(struct ipxhdr); l = (__be32 *) c; /* Don't broadcast packet if already seen this net */ for (i = 0; i < IPX_SKB_CB(skb)->ipx_tctrl; i++) if (*l++ == intrfc->if_netnum) goto out; /* < IPX_MAX_PPROP_HOPS hops && input interface not in list. Save the * position where we will insert recvd netnum into list, later on, * in ipxitf_send */ IPX_SKB_CB(skb)->last_hop.index = i; IPX_SKB_CB(skb)->last_hop.netnum = intrfc->if_netnum; /* xmit on all other interfaces... */ spin_lock_bh(&ipx_interfaces_lock); list_for_each_entry(ifcs, &ipx_interfaces, node) { /* Except unconfigured interfaces */ if (!ifcs->if_netnum) continue; /* That aren't in the list */ if (ifcs == intrfc) continue; l = (__be32 *) c; /* don't consider the last entry in the packet list, * it is our netnum, and it is not there yet */ for (i = 0; i < IPX_SKB_CB(skb)->ipx_tctrl; i++) if (ifcs->if_netnum == *l++) break; if (i == IPX_SKB_CB(skb)->ipx_tctrl) { struct sk_buff *s = skb_copy(skb, GFP_ATOMIC); if (s) { IPX_SKB_CB(s)->ipx_dest_net = ifcs->if_netnum; ipxrtr_route_skb(s); } } } spin_unlock_bh(&ipx_interfaces_lock); out: return rc; } static void ipxitf_insert(struct ipx_interface *intrfc) { spin_lock_bh(&ipx_interfaces_lock); list_add_tail(&intrfc->node, &ipx_interfaces); spin_unlock_bh(&ipx_interfaces_lock); if (ipxcfg_auto_select_primary && !ipx_primary_net) ipx_primary_net = intrfc; } static struct ipx_interface *ipxitf_alloc(struct net_device *dev, __be32 netnum, __be16 dlink_type, struct datalink_proto *dlink, unsigned char internal, int ipx_offset) { struct ipx_interface *intrfc = kmalloc(sizeof(*intrfc), GFP_ATOMIC); if (intrfc) { intrfc->if_dev = dev; intrfc->if_netnum = netnum; intrfc->if_dlink_type = dlink_type; intrfc->if_dlink = dlink; intrfc->if_internal = internal; intrfc->if_ipx_offset = ipx_offset; intrfc->if_sknum = IPX_MIN_EPHEMERAL_SOCKET; INIT_HLIST_HEAD(&intrfc->if_sklist); atomic_set(&intrfc->refcnt, 1); spin_lock_init(&intrfc->if_sklist_lock); } return intrfc; } static int ipxitf_create_internal(struct ipx_interface_definition *idef) { struct ipx_interface *intrfc; int rc = -EEXIST; /* Only one primary network allowed */ if (ipx_primary_net) goto out; /* Must have a valid network number */ rc = -EADDRNOTAVAIL; if (!idef->ipx_network) goto out; intrfc = ipxitf_find_using_net(idef->ipx_network); rc = -EADDRINUSE; if (intrfc) { ipxitf_put(intrfc); goto out; } intrfc = ipxitf_alloc(NULL, idef->ipx_network, 0, NULL, 1, 0); rc = -EAGAIN; if (!intrfc) goto out; memcpy((char *)&(intrfc->if_node), idef->ipx_node, IPX_NODE_LEN); ipx_internal_net = ipx_primary_net = intrfc; ipxitf_hold(intrfc); ipxitf_insert(intrfc); rc = ipxitf_add_local_route(intrfc); ipxitf_put(intrfc); out: return rc; } static __be16 ipx_map_frame_type(unsigned char type) { __be16 rc = 0; switch (type) { case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; case IPX_FRAME_8022: rc = htons(ETH_P_802_2); break; case IPX_FRAME_SNAP: rc = htons(ETH_P_SNAP); break; case IPX_FRAME_8023: rc = htons(ETH_P_802_3); break; } return rc; } static int ipxitf_create(struct ipx_interface_definition *idef) { struct net_device *dev; __be16 dlink_type = 0; struct datalink_proto *datalink = NULL; struct ipx_interface *intrfc; int rc; if (idef->ipx_special == IPX_INTERNAL) { rc = ipxitf_create_internal(idef); goto out; } rc = -EEXIST; if (idef->ipx_special == IPX_PRIMARY && ipx_primary_net) goto out; intrfc = ipxitf_find_using_net(idef->ipx_network); rc = -EADDRINUSE; if (idef->ipx_network && intrfc) { ipxitf_put(intrfc); goto out; } if (intrfc) ipxitf_put(intrfc); dev = dev_get_by_name(&init_net, idef->ipx_device); rc = -ENODEV; if (!dev) goto out; switch (idef->ipx_dlink_type) { case IPX_FRAME_8022: dlink_type = htons(ETH_P_802_2); datalink = p8022_datalink; break; case IPX_FRAME_ETHERII: if (dev->type != ARPHRD_IEEE802) { dlink_type = htons(ETH_P_IPX); datalink = pEII_datalink; break; } /* fall through */ case IPX_FRAME_SNAP: dlink_type = htons(ETH_P_SNAP); datalink = pSNAP_datalink; break; case IPX_FRAME_8023: dlink_type = htons(ETH_P_802_3); datalink = p8023_datalink; break; case IPX_FRAME_NONE: default: rc = -EPROTONOSUPPORT; goto out_dev; } rc = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_dev; /* Check addresses are suitable */ rc = -EINVAL; if (dev->addr_len > IPX_NODE_LEN) goto out_dev; intrfc = ipxitf_find_using_phys(dev, dlink_type); if (!intrfc) { /* Ok now create */ intrfc = ipxitf_alloc(dev, idef->ipx_network, dlink_type, datalink, 0, dev->hard_header_len + datalink->header_length); rc = -EAGAIN; if (!intrfc) goto out_dev; /* Setup primary if necessary */ if (idef->ipx_special == IPX_PRIMARY) ipx_primary_net = intrfc; if (!memcmp(idef->ipx_node, "\000\000\000\000\000\000", IPX_NODE_LEN)) { memset(intrfc->if_node, 0, IPX_NODE_LEN); memcpy(intrfc->if_node + IPX_NODE_LEN - dev->addr_len, dev->dev_addr, dev->addr_len); } else memcpy(intrfc->if_node, idef->ipx_node, IPX_NODE_LEN); ipxitf_hold(intrfc); ipxitf_insert(intrfc); } /* If the network number is known, add a route */ rc = 0; if (!intrfc->if_netnum) goto out_intrfc; rc = ipxitf_add_local_route(intrfc); out_intrfc: ipxitf_put(intrfc); goto out; out_dev: dev_put(dev); out: return rc; } static int ipxitf_delete(struct ipx_interface_definition *idef) { struct net_device *dev = NULL; __be16 dlink_type = 0; struct ipx_interface *intrfc; int rc = 0; spin_lock_bh(&ipx_interfaces_lock); if (idef->ipx_special == IPX_INTERNAL) { if (ipx_internal_net) { __ipxitf_put(ipx_internal_net); goto out; } rc = -ENOENT; goto out; } dlink_type = ipx_map_frame_type(idef->ipx_dlink_type); rc = -EPROTONOSUPPORT; if (!dlink_type) goto out; dev = __dev_get_by_name(&init_net, idef->ipx_device); rc = -ENODEV; if (!dev) goto out; intrfc = __ipxitf_find_using_phys(dev, dlink_type); rc = -EINVAL; if (!intrfc) goto out; __ipxitf_put(intrfc); rc = 0; out: spin_unlock_bh(&ipx_interfaces_lock); return rc; } static struct ipx_interface *ipxitf_auto_create(struct net_device *dev, __be16 dlink_type) { struct ipx_interface *intrfc = NULL; struct datalink_proto *datalink; if (!dev) goto out; /* Check addresses are suitable */ if (dev->addr_len > IPX_NODE_LEN) goto out; switch (ntohs(dlink_type)) { case ETH_P_IPX: datalink = pEII_datalink; break; case ETH_P_802_2: datalink = p8022_datalink; break; case ETH_P_SNAP: datalink = pSNAP_datalink; break; case ETH_P_802_3: datalink = p8023_datalink; break; default: goto out; } intrfc = ipxitf_alloc(dev, 0, dlink_type, datalink, 0, dev->hard_header_len + datalink->header_length); if (intrfc) { memset(intrfc->if_node, 0, IPX_NODE_LEN); memcpy((char *)&(intrfc->if_node[IPX_NODE_LEN-dev->addr_len]), dev->dev_addr, dev->addr_len); spin_lock_init(&intrfc->if_sklist_lock); atomic_set(&intrfc->refcnt, 1); ipxitf_insert(intrfc); dev_hold(dev); } out: return intrfc; } static int ipxitf_ioctl(unsigned int cmd, void __user *arg) { int rc = -EINVAL; struct ifreq ifr; int val; switch (cmd) { case SIOCSIFADDR: { struct sockaddr_ipx *sipx; struct ipx_interface_definition f; rc = -EFAULT; if (copy_from_user(&ifr, arg, sizeof(ifr))) break; sipx = (struct sockaddr_ipx *)&ifr.ifr_addr; rc = -EINVAL; if (sipx->sipx_family != AF_IPX) break; f.ipx_network = sipx->sipx_network; memcpy(f.ipx_device, ifr.ifr_name, sizeof(f.ipx_device)); memcpy(f.ipx_node, sipx->sipx_node, IPX_NODE_LEN); f.ipx_dlink_type = sipx->sipx_type; f.ipx_special = sipx->sipx_special; if (sipx->sipx_action == IPX_DLTITF) rc = ipxitf_delete(&f); else rc = ipxitf_create(&f); break; } case SIOCGIFADDR: { struct sockaddr_ipx *sipx; struct ipx_interface *ipxif; struct net_device *dev; rc = -EFAULT; if (copy_from_user(&ifr, arg, sizeof(ifr))) break; sipx = (struct sockaddr_ipx *)&ifr.ifr_addr; dev = __dev_get_by_name(&init_net, ifr.ifr_name); rc = -ENODEV; if (!dev) break; ipxif = ipxitf_find_using_phys(dev, ipx_map_frame_type(sipx->sipx_type)); rc = -EADDRNOTAVAIL; if (!ipxif) break; sipx->sipx_family = AF_IPX; sipx->sipx_network = ipxif->if_netnum; memcpy(sipx->sipx_node, ipxif->if_node, sizeof(sipx->sipx_node)); rc = 0; if (copy_to_user(arg, &ifr, sizeof(ifr))) rc = -EFAULT; ipxitf_put(ipxif); break; } case SIOCAIPXITFCRT: rc = -EFAULT; if (get_user(val, (unsigned char __user *) arg)) break; rc = 0; ipxcfg_auto_create_interfaces = val; break; case SIOCAIPXPRISLT: rc = -EFAULT; if (get_user(val, (unsigned char __user *) arg)) break; rc = 0; ipxcfg_set_auto_select(val); break; } return rc; } /* * Checksum routine for IPX */ /* Note: We assume ipx_tctrl==0 and htons(length)==ipx_pktsize */ /* This functions should *not* mess with packet contents */ __be16 ipx_cksum(struct ipxhdr *packet, int length) { /* * NOTE: sum is a net byte order quantity, which optimizes the * loop. This only works on big and little endian machines. (I * don't know of a machine that isn't.) */ /* handle the first 3 words separately; checksum should be skipped * and ipx_tctrl masked out */ __u16 *p = (__u16 *)packet; __u32 sum = p[1] + (p[2] & (__force u16)htons(0x00ff)); __u32 i = (length >> 1) - 3; /* Number of remaining complete words */ /* Loop through them */ p += 3; while (i--) sum += *p++; /* Add on the last part word if it exists */ if (packet->ipx_pktsize & htons(1)) sum += (__force u16)htons(0xff00) & *p; /* Do final fixup */ sum = (sum & 0xffff) + (sum >> 16); /* It's a pity there's no concept of carry in C */ if (sum >= 0x10000) sum++; /* * Leave 0 alone; we don't want 0xffff here. Note that we can't get * here with 0x10000, so this check is the same as ((__u16)sum) */ if (sum) sum = ~sum; return (__force __be16)sum; } const char *ipx_frame_name(__be16 frame) { char* rc = "None"; switch (ntohs(frame)) { case ETH_P_IPX: rc = "EtherII"; break; case ETH_P_802_2: rc = "802.2"; break; case ETH_P_SNAP: rc = "SNAP"; break; case ETH_P_802_3: rc = "802.3"; break; } return rc; } const char *ipx_device_name(struct ipx_interface *intrfc) { return intrfc->if_internal ? "Internal" : intrfc->if_dev ? intrfc->if_dev->name : "Unknown"; } /* Handling for system calls applied via the various interfaces to an IPX * socket object. */ static int ipx_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int opt; int rc = -EINVAL; lock_sock(sk); if (optlen != sizeof(int)) goto out; rc = -EFAULT; if (get_user(opt, (unsigned int __user *)optval)) goto out; rc = -ENOPROTOOPT; if (!(level == SOL_IPX && optname == IPX_TYPE)) goto out; ipx_sk(sk)->type = opt; rc = 0; out: release_sock(sk); return rc; } static int ipx_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val = 0; int len; int rc = -ENOPROTOOPT; lock_sock(sk); if (!(level == SOL_IPX && optname == IPX_TYPE)) goto out; val = ipx_sk(sk)->type; rc = -EFAULT; if (get_user(len, optlen)) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EINVAL; if(len < 0) goto out; rc = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &val, len)) goto out; rc = 0; out: release_sock(sk); return rc; } static struct proto ipx_proto = { .name = "IPX", .owner = THIS_MODULE, .obj_size = sizeof(struct ipx_sock), }; static int ipx_create(struct net *net, struct socket *sock, int protocol, int kern) { int rc = -ESOCKTNOSUPPORT; struct sock *sk; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; /* * SPX support is not anymore in the kernel sources. If you want to * ressurrect it, completing it and making it understand shared skbs, * be fully multithreaded, etc, grab the sources in an early 2.5 kernel * tree. */ if (sock->type != SOCK_DGRAM) goto out; rc = -ENOMEM; sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, kern); if (!sk) goto out; sk_refcnt_debug_inc(sk); sock_init_data(sock, sk); sk->sk_no_check_tx = 1; /* Checksum off by default */ sock->ops = &ipx_dgram_ops; rc = 0; out: return rc; } static int ipx_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) goto out; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock->sk = NULL; sk_refcnt_debug_release(sk); ipx_destroy_socket(sk); release_sock(sk); sock_put(sk); out: return 0; } /* caller must hold a reference to intrfc */ static __be16 ipx_first_free_socketnum(struct ipx_interface *intrfc) { unsigned short socketNum = intrfc->if_sknum; spin_lock_bh(&intrfc->if_sklist_lock); if (socketNum < IPX_MIN_EPHEMERAL_SOCKET) socketNum = IPX_MIN_EPHEMERAL_SOCKET; while (__ipxitf_find_socket(intrfc, htons(socketNum))) if (socketNum > IPX_MAX_EPHEMERAL_SOCKET) socketNum = IPX_MIN_EPHEMERAL_SOCKET; else socketNum++; spin_unlock_bh(&intrfc->if_sklist_lock); intrfc->if_sknum = socketNum; return htons(socketNum); } static int __ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct ipx_interface *intrfc; struct sockaddr_ipx *addr = (struct sockaddr_ipx *)uaddr; int rc = -EINVAL; if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_ipx)) goto out; intrfc = ipxitf_find_using_net(addr->sipx_network); rc = -EADDRNOTAVAIL; if (!intrfc) goto out; if (!addr->sipx_port) { addr->sipx_port = ipx_first_free_socketnum(intrfc); rc = -EINVAL; if (!addr->sipx_port) goto out_put; } /* protect IPX system stuff like routing/sap */ rc = -EACCES; if (ntohs(addr->sipx_port) < IPX_MIN_EPHEMERAL_SOCKET && !capable(CAP_NET_ADMIN)) goto out_put; ipxs->port = addr->sipx_port; #ifdef CONFIG_IPX_INTERN if (intrfc == ipx_internal_net) { /* The source address is to be set explicitly if the * socket is to be bound on the internal network. If a * node number 0 was specified, the default is used. */ rc = -EINVAL; if (!memcmp(addr->sipx_node, ipx_broadcast_node, IPX_NODE_LEN)) goto out_put; if (!memcmp(addr->sipx_node, ipx_this_node, IPX_NODE_LEN)) memcpy(ipxs->node, intrfc->if_node, IPX_NODE_LEN); else memcpy(ipxs->node, addr->sipx_node, IPX_NODE_LEN); rc = -EADDRINUSE; if (ipxitf_find_internal_socket(intrfc, ipxs->node, ipxs->port)) { SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n", ntohs(addr->sipx_port)); goto out_put; } } else { /* Source addresses are easy. It must be our * network:node pair for an interface routed to IPX * with the ipx routing ioctl() */ memcpy(ipxs->node, intrfc->if_node, IPX_NODE_LEN); rc = -EADDRINUSE; if (ipxitf_find_socket(intrfc, addr->sipx_port)) { SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n", ntohs(addr->sipx_port)); goto out_put; } } #else /* !def CONFIG_IPX_INTERN */ /* Source addresses are easy. It must be our network:node pair for an interface routed to IPX with the ipx routing ioctl() */ rc = -EADDRINUSE; if (ipxitf_find_socket(intrfc, addr->sipx_port)) { SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n", ntohs((int)addr->sipx_port)); goto out_put; } #endif /* CONFIG_IPX_INTERN */ ipxitf_insert_socket(intrfc, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out_put: ipxitf_put(intrfc); out: return rc; } static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; int rc; lock_sock(sk); rc = __ipx_bind(sock, uaddr, addr_len); release_sock(sk); return rc; } static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *addr; int rc = -EINVAL; struct ipx_route *rt; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; lock_sock(sk); if (addr_len != sizeof(*addr)) goto out; addr = (struct sockaddr_ipx *)uaddr; /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } /* We can either connect to primary network or somewhere * we can route to */ rt = ipxrtr_lookup(addr->sipx_network); rc = -ENETUNREACH; if (!rt && !(!addr->sipx_network && ipx_primary_net)) goto out; ipxs->dest_addr.net = addr->sipx_network; ipxs->dest_addr.sock = addr->sipx_port; memcpy(ipxs->dest_addr.node, addr->sipx_node, IPX_NODE_LEN); ipxs->type = addr->sipx_type; if (sock->type == SOCK_DGRAM) { sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; } if (rt) ipxrtr_put(rt); rc = 0; out: release_sock(sk); return rc; } static int ipx_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct ipx_address *addr; struct sockaddr_ipx sipx; struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); int rc; *uaddr_len = sizeof(struct sockaddr_ipx); lock_sock(sk); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; addr = &ipxs->dest_addr; sipx.sipx_network = addr->net; sipx.sipx_port = addr->sock; memcpy(sipx.sipx_node, addr->node, IPX_NODE_LEN); } else { if (ipxs->intrfc) { sipx.sipx_network = ipxs->intrfc->if_netnum; #ifdef CONFIG_IPX_INTERN memcpy(sipx.sipx_node, ipxs->node, IPX_NODE_LEN); #else memcpy(sipx.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ } else { sipx.sipx_network = 0; memset(sipx.sipx_node, '\0', IPX_NODE_LEN); } sipx.sipx_port = ipxs->port; } sipx.sipx_family = AF_IPX; sipx.sipx_type = ipxs->type; sipx.sipx_zero = 0; memcpy(uaddr, &sipx, sizeof(sipx)); rc = 0; out: release_sock(sk); return rc; } static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { /* NULL here for pt means the packet was looped back */ struct ipx_interface *intrfc; struct ipxhdr *ipx; u16 ipx_pktsize; int rc = 0; if (!net_eq(dev_net(dev), &init_net)) goto drop; /* Not ours */ if (skb->pkt_type == PACKET_OTHERHOST) goto drop; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out; if (!pskb_may_pull(skb, sizeof(struct ipxhdr))) goto drop; ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize); /* Too small or invalid header? */ if (ipx_pktsize < sizeof(struct ipxhdr) || !pskb_may_pull(skb, ipx_pktsize)) goto drop; ipx = ipx_hdr(skb); if (ipx->ipx_checksum != IPX_NO_CHECKSUM && ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) goto drop; IPX_SKB_CB(skb)->ipx_tctrl = ipx->ipx_tctrl; IPX_SKB_CB(skb)->ipx_dest_net = ipx->ipx_dest.net; IPX_SKB_CB(skb)->ipx_source_net = ipx->ipx_source.net; /* Determine what local ipx endpoint this is */ intrfc = ipxitf_find_using_phys(dev, pt->type); if (!intrfc) { if (ipxcfg_auto_create_interfaces && IPX_SKB_CB(skb)->ipx_dest_net) { intrfc = ipxitf_auto_create(dev, pt->type); if (intrfc) ipxitf_hold(intrfc); } if (!intrfc) /* Not one of ours */ /* or invalid packet for auto creation */ goto drop; } rc = ipxitf_rcv(intrfc, skb); ipxitf_put(intrfc); goto out; drop: kfree_skb(skb); out: return rc; } static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ipx *, usipx, msg->msg_name); struct sockaddr_ipx local_sipx; int rc = -EINVAL; int flags = msg->msg_flags; lock_sock(sk); /* Socket gets bound below anyway */ /* if (sk->sk_zapped) return -EIO; */ /* Socket not bound */ if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) goto out; /* Max possible packet size limited by 16 bit pktsize in header */ if (len >= 65535 - sizeof(struct ipxhdr)) goto out; if (usipx) { if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -EINVAL; if (msg->msg_namelen < sizeof(*usipx) || usipx->sipx_family != AF_IPX) goto out; } else { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; usipx = &local_sipx; usipx->sipx_family = AF_IPX; usipx->sipx_type = ipxs->type; usipx->sipx_port = ipxs->dest_addr.sock; usipx->sipx_network = ipxs->dest_addr.net; memcpy(usipx->sipx_node, ipxs->dest_addr.node, IPX_NODE_LEN); } rc = ipxrtr_route_packet(sk, usipx, msg, len, flags & MSG_DONTWAIT); if (rc >= 0) rc = len; out: release_sock(sk); return rc; } static int ipx_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ipx *, sipx, msg->msg_name); struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; bool locked = true; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; release_sock(sk); locked = false; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) { if (rc == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN)) rc = 0; goto out; } ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied); if (rc) goto out_free; if (skb->tstamp) sk->sk_stamp = skb->tstamp; if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; msg->msg_namelen = sizeof(*sipx); } rc = copied; out_free: skb_free_datagram(sk, skb); out: if (locked) release_sock(sk); return rc; } static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int rc = 0; long amount = 0; struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; lock_sock(sk); switch (cmd) { case TIOCOUTQ: amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (int __user *)argp); break; case TIOCINQ: { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); /* These two are safe on a single CPU system as only * user tasks fiddle here */ if (skb) amount = skb->len - sizeof(struct ipxhdr); rc = put_user(amount, (int __user *)argp); break; } case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (capable(CAP_NET_ADMIN)) rc = ipxrtr_ioctl(cmd, argp); break; case SIOCSIFADDR: case SIOCAIPXITFCRT: case SIOCAIPXPRISLT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; case SIOCGIFADDR: rc = ipxitf_ioctl(cmd, argp); break; case SIOCIPXCFGDATA: rc = ipxcfg_get_config_data(argp); break; case SIOCIPXNCPCONN: /* * This socket wants to take care of the NCP connection * handed to us in arg. */ rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = get_user(ipx_sk(sk)->ipx_ncp_conn, (const unsigned short __user *)argp); break; case SIOCGSTAMP: rc = sock_get_timestamp(sk, argp); break; case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: rc = -EINVAL; break; default: rc = -ENOIOCTLCMD; break; } release_sock(sk); return rc; } #ifdef CONFIG_COMPAT static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* * These 4 commands use same structure on 32bit and 64bit. Rest of IPX * commands is handled by generic ioctl code. As these commands are * SIOCPROTOPRIVATE..SIOCPROTOPRIVATE+3, they cannot be handled by generic * code. */ switch (cmd) { case SIOCAIPXITFCRT: case SIOCAIPXPRISLT: case SIOCIPXCFGDATA: case SIOCIPXNCPCONN: return ipx_ioctl(sock, cmd, arg); default: return -ENOIOCTLCMD; } } #endif static int ipx_shutdown(struct socket *sock, int mode) { struct sock *sk = sock->sk; if (mode < SHUT_RD || mode > SHUT_RDWR) return -EINVAL; /* This maps: * SHUT_RD (0) -> RCV_SHUTDOWN (1) * SHUT_WR (1) -> SEND_SHUTDOWN (2) * SHUT_RDWR (2) -> SHUTDOWN_MASK (3) */ ++mode; lock_sock(sk); sk->sk_shutdown |= mode; release_sock(sk); sk->sk_state_change(sk); return 0; } /* * Socket family declarations */ static const struct net_proto_family ipx_family_ops = { .family = PF_IPX, .create = ipx_create, .owner = THIS_MODULE, }; static const struct proto_ops ipx_dgram_ops = { .family = PF_IPX, .owner = THIS_MODULE, .release = ipx_release, .bind = ipx_bind, .connect = ipx_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = ipx_getname, .poll = datagram_poll, .ioctl = ipx_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ipx_compat_ioctl, #endif .listen = sock_no_listen, .shutdown = ipx_shutdown, .setsockopt = ipx_setsockopt, .getsockopt = ipx_getsockopt, .sendmsg = ipx_sendmsg, .recvmsg = ipx_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct packet_type ipx_8023_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_802_3), .func = ipx_rcv, }; static struct packet_type ipx_dix_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IPX), .func = ipx_rcv, }; static struct notifier_block ipx_dev_notifier = { .notifier_call = ipxitf_device_event, }; static const unsigned char ipx_8022_type = 0xE0; static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; static const char ipx_EII_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with Ethernet II\n"; static const char ipx_8023_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with 802.3\n"; static const char ipx_llc_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with 802.2\n"; static const char ipx_snap_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with SNAP\n"; static int __init ipx_init(void) { int rc = proto_register(&ipx_proto, 1); if (rc != 0) goto out; sock_register(&ipx_family_ops); pEII_datalink = make_EII_client(); if (pEII_datalink) dev_add_pack(&ipx_dix_packet_type); else printk(ipx_EII_err_msg); p8023_datalink = make_8023_client(); if (p8023_datalink) dev_add_pack(&ipx_8023_packet_type); else printk(ipx_8023_err_msg); p8022_datalink = register_8022_client(ipx_8022_type, ipx_rcv); if (!p8022_datalink) printk(ipx_llc_err_msg); pSNAP_datalink = register_snap_client(ipx_snap_id, ipx_rcv); if (!pSNAP_datalink) printk(ipx_snap_err_msg); register_netdevice_notifier(&ipx_dev_notifier); ipx_register_sysctl(); ipx_proc_init(); out: return rc; } static void __exit ipx_proto_finito(void) { ipx_proc_exit(); ipx_unregister_sysctl(); unregister_netdevice_notifier(&ipx_dev_notifier); ipxitf_cleanup(); if (pSNAP_datalink) { unregister_snap_client(pSNAP_datalink); pSNAP_datalink = NULL; } if (p8022_datalink) { unregister_8022_client(p8022_datalink); p8022_datalink = NULL; } dev_remove_pack(&ipx_8023_packet_type); if (p8023_datalink) { destroy_8023_client(p8023_datalink); p8023_datalink = NULL; } dev_remove_pack(&ipx_dix_packet_type); if (pEII_datalink) { destroy_EII_client(pEII_datalink); pEII_datalink = NULL; } proto_unregister(&ipx_proto); sock_unregister(ipx_family_ops.family); } module_init(ipx_init); module_exit(ipx_proto_finito); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_IPX);
./CrossVul/dataset_final_sorted/CWE-416/c/good_3269_0
crossvul-cpp_data_good_820_1
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/pipe.c * * Copyright (C) 1991, 1992, 1999 Linus Torvalds */ #include <linux/mm.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/log2.h> #include <linux/mount.h> #include <linux/magic.h> #include <linux/pipe_fs_i.h> #include <linux/uio.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/audit.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <linux/memcontrol.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include "internal.h" /* * The max size that a non-root user is allowed to grow the pipe. Can * be set by root in /proc/sys/fs/pipe-max-size */ unsigned int pipe_max_size = 1048576; /* Maximum allocatable pages per user. Hard limit is unset by default, soft * matches default values. */ unsigned long pipe_user_pages_hard; unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR; /* * We use a start+len construction, which provides full use of the * allocated memory. * -- Florian Coosmann (FGC) * * Reads with count = 0 should always return 0. * -- Julian Bradfield 1999-06-07. * * FIFOs and Pipes now generate SIGIO for both readers and writers. * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16 * * pipe_read & write cleanup * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 */ static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass) { if (pipe->files) mutex_lock_nested(&pipe->mutex, subclass); } void pipe_lock(struct pipe_inode_info *pipe) { /* * pipe_lock() nests non-pipe inode locks (for writing to a file) */ pipe_lock_nested(pipe, I_MUTEX_PARENT); } EXPORT_SYMBOL(pipe_lock); void pipe_unlock(struct pipe_inode_info *pipe) { if (pipe->files) mutex_unlock(&pipe->mutex); } EXPORT_SYMBOL(pipe_unlock); static inline void __pipe_lock(struct pipe_inode_info *pipe) { mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); } static inline void __pipe_unlock(struct pipe_inode_info *pipe) { mutex_unlock(&pipe->mutex); } void pipe_double_lock(struct pipe_inode_info *pipe1, struct pipe_inode_info *pipe2) { BUG_ON(pipe1 == pipe2); if (pipe1 < pipe2) { pipe_lock_nested(pipe1, I_MUTEX_PARENT); pipe_lock_nested(pipe2, I_MUTEX_CHILD); } else { pipe_lock_nested(pipe2, I_MUTEX_PARENT); pipe_lock_nested(pipe1, I_MUTEX_CHILD); } } /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe) { DEFINE_WAIT(wait); /* * Pipes are system-local resources, so sleeping on them * is considered a noninteractive wait: */ prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); pipe_unlock(pipe); schedule(); finish_wait(&pipe->wait, &wait); pipe_lock(pipe); } static void anon_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * If nobody else uses this page, and we don't already have a * temporary page, let's keep track of it as a one-deep * allocation cache. (Otherwise just release our reference to it) */ if (page_count(page) == 1 && !pipe->tmp_page) pipe->tmp_page = page; else put_page(page); } static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; if (page_count(page) == 1) { memcg_kmem_uncharge(page, 0); __SetPageLocked(page); return 0; } return 1; } /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal * * Description: * This function attempts to steal the &struct page attached to * @buf. If successful, this function returns 0 and returns with * the page locked. The caller may then reuse the page for whatever * he wishes; the typical use is insertion into a different file * page cache. */ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct page *page = buf->page; /* * A reference of one is golden, that means that the owner of this * page is the only one holding a reference to it. lock the page * and return OK. */ if (page_count(page) == 1) { lock_page(page); return 0; } return 1; } EXPORT_SYMBOL(generic_pipe_buf_steal); /** * generic_pipe_buf_get - get a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to * * Description: * This function grabs an extra reference to @buf. It's used in * in the tee() system call, when we duplicate the buffers in one * pipe into another. */ bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return try_get_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_get); /** * generic_pipe_buf_confirm - verify contents of the pipe buffer * @info: the pipe that the buffer belongs to * @buf: the buffer to confirm * * Description: * This function does nothing, because the generic pipe code uses * pages that are always good when inserted into the pipe. */ int generic_pipe_buf_confirm(struct pipe_inode_info *info, struct pipe_buffer *buf) { return 0; } EXPORT_SYMBOL(generic_pipe_buf_confirm); /** * generic_pipe_buf_release - put a reference to a &struct pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to put a reference to * * Description: * This function releases a reference to @buf. */ void generic_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { put_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_release); /* New data written to a pipe may be appended to a buffer with this type. */ static const struct pipe_buf_operations anon_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = { .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; static const struct pipe_buf_operations packet_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; /** * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable * @buf: the buffer to mark * * Description: * This function ensures that no future writes will be merged into the * given &struct pipe_buffer. This is necessary when multiple pipe buffers * share the same backing page. */ void pipe_buf_mark_unmergeable(struct pipe_buffer *buf) { if (buf->ops == &anon_pipe_buf_ops) buf->ops = &anon_pipe_buf_nomerge_ops; } static bool pipe_buf_can_merge(struct pipe_buffer *buf) { return buf->ops == &anon_pipe_buf_ops; } static ssize_t pipe_read(struct kiocb *iocb, struct iov_iter *to) { size_t total_len = iov_iter_count(to); struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; int do_wakeup; ssize_t ret; /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; __pipe_lock(pipe); for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; size_t chars = buf->len; size_t written; int error; if (chars > total_len) chars = total_len; error = pipe_buf_confirm(pipe, buf); if (error) { if (!ret) ret = error; break; } written = copy_page_to_iter(buf->page, buf->offset, chars, to); if (unlikely(written < chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; buf->offset += chars; buf->len -= chars; /* Was it a packet buffer? Clean up and exit */ if (buf->flags & PIPE_BUF_FLAG_PACKET) { total_len = chars; buf->len = 0; } if (!buf->len) { pipe_buf_release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } __pipe_unlock(pipe); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; } static inline int is_packetized(struct file *file) { return (file->f_flags & O_DIRECT) != 0; } static ssize_t pipe_write(struct kiocb *iocb, struct iov_iter *from) { struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; ssize_t ret = 0; int do_wakeup = 0; size_t total_len = iov_iter_count(from); ssize_t chars; /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; __pipe_lock(pipe); if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } /* We try to merge small writes */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ if (pipe->nrbufs && chars != 0) { int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + lastbuf; int offset = buf->offset + buf->len; if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) { ret = pipe_buf_confirm(pipe, buf); if (ret) goto out; ret = copy_page_from_iter(buf->page, offset, chars, from); if (unlikely(ret < chars)) { ret = -EFAULT; goto out; } do_wakeup = 1; buf->len += ret; if (!iov_iter_count(from)) goto out; } } for (;;) { int bufs; if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } bufs = pipe->nrbufs; if (bufs < pipe->buffers) { int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); struct pipe_buffer *buf = pipe->bufs + newbuf; struct page *page = pipe->tmp_page; int copied; if (!page) { page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; } pipe->tmp_page = page; } /* Always wake up, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. * FIXME! Is this really true? */ do_wakeup = 1; copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { if (!ret) ret = -EFAULT; break; } ret += copied; /* Insert it into the buffer array */ buf->page = page; buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = copied; buf->flags = 0; if (is_packetized(filp)) { buf->ops = &packet_pipe_buf_ops; buf->flags = PIPE_BUF_FLAG_PACKET; } pipe->nrbufs = ++bufs; pipe->tmp_page = NULL; if (!iov_iter_count(from)) break; } if (bufs < pipe->buffers) continue; if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } out: __pipe_unlock(pipe); if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { int err = file_update_time(filp); if (err) ret = err; sb_end_write(file_inode(filp)->i_sb); } return ret; } static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe = filp->private_data; int count, buf, nrbufs; switch (cmd) { case FIONREAD: __pipe_lock(pipe); count = 0; buf = pipe->curbuf; nrbufs = pipe->nrbufs; while (--nrbufs >= 0) { count += pipe->bufs[buf].len; buf = (buf+1) & (pipe->buffers - 1); } __pipe_unlock(pipe); return put_user(count, (int __user *)arg); default: return -ENOIOCTLCMD; } } /* No kernel lock held - fine */ static __poll_t pipe_poll(struct file *filp, poll_table *wait) { __poll_t mask; struct pipe_inode_info *pipe = filp->private_data; int nrbufs; poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ nrbufs = pipe->nrbufs; mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) mask |= EPOLLHUP; } if (filp->f_mode & FMODE_WRITE) { mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0; /* * Most Unices do not set EPOLLERR for FIFOs but on Linux they * behave exactly like pipes for poll(). */ if (!pipe->readers) mask |= EPOLLERR; } return mask; } static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) { int kill = 0; spin_lock(&inode->i_lock); if (!--pipe->files) { inode->i_pipe = NULL; kill = 1; } spin_unlock(&inode->i_lock); if (kill) free_pipe_info(pipe); } static int pipe_release(struct inode *inode, struct file *file) { struct pipe_inode_info *pipe = file->private_data; __pipe_lock(pipe); if (file->f_mode & FMODE_READ) pipe->readers--; if (file->f_mode & FMODE_WRITE) pipe->writers--; if (pipe->readers || pipe->writers) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } __pipe_unlock(pipe); put_pipe_info(inode, pipe); return 0; } static int pipe_fasync(int fd, struct file *filp, int on) { struct pipe_inode_info *pipe = filp->private_data; int retval = 0; __pipe_lock(pipe); if (filp->f_mode & FMODE_READ) retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); if ((filp->f_mode & FMODE_WRITE) && retval >= 0) { retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); if (retval < 0 && (filp->f_mode & FMODE_READ)) /* this can happen only if on == T */ fasync_helper(-1, filp, 0, &pipe->fasync_readers); } __pipe_unlock(pipe); return retval; } static unsigned long account_pipe_buffers(struct user_struct *user, unsigned long old, unsigned long new) { return atomic_long_add_return(new - old, &user->pipe_bufs); } static bool too_many_pipe_buffers_soft(unsigned long user_bufs) { unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); return soft_limit && user_bufs > soft_limit; } static bool too_many_pipe_buffers_hard(unsigned long user_bufs) { unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); return hard_limit && user_bufs > hard_limit; } static bool is_unprivileged_user(void) { return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); } struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; unsigned long pipe_bufs = PIPE_DEF_BUFFERS; struct user_struct *user = get_current_user(); unsigned long user_bufs; unsigned int max_size = READ_ONCE(pipe_max_size); pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); if (pipe == NULL) goto out_free_uid; if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) pipe_bufs = max_size >> PAGE_SHIFT; user_bufs = account_pipe_buffers(user, 0, pipe_bufs); if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { user_bufs = account_pipe_buffers(user, pipe_bufs, 1); pipe_bufs = 1; } if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) goto out_revert_acct; pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), GFP_KERNEL_ACCOUNT); if (pipe->bufs) { init_waitqueue_head(&pipe->wait); pipe->r_counter = pipe->w_counter = 1; pipe->buffers = pipe_bufs; pipe->user = user; mutex_init(&pipe->mutex); return pipe; } out_revert_acct: (void) account_pipe_buffers(user, pipe_bufs, 0); kfree(pipe); out_free_uid: free_uid(user); return NULL; } void free_pipe_info(struct pipe_inode_info *pipe) { int i; (void) account_pipe_buffers(pipe->user, pipe->buffers, 0); free_uid(pipe->user); for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) pipe_buf_release(pipe, buf); } if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe->bufs); kfree(pipe); } static struct vfsmount *pipe_mnt __read_mostly; /* * pipefs_dname() is called from d_path(). */ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]", d_inode(dentry)->i_ino); } static const struct dentry_operations pipefs_dentry_operations = { .d_dname = pipefs_dname, }; static struct inode * get_pipe_inode(void) { struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb); struct pipe_inode_info *pipe; if (!inode) goto fail_inode; inode->i_ino = get_next_ino(); pipe = alloc_pipe_info(); if (!pipe) goto fail_iput; inode->i_pipe = pipe; pipe->files = 2; pipe->readers = pipe->writers = 1; inode->i_fop = &pipefifo_fops; /* * Mark the inode dirty from the very beginning, * that way it will never be moved to the dirty * list because "mark_inode_dirty()" will think * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); return inode; fail_iput: iput(inode); fail_inode: return NULL; } int create_pipe_files(struct file **res, int flags) { struct inode *inode = get_pipe_inode(); struct file *f; if (!inode) return -ENFILE; f = alloc_file_pseudo(inode, pipe_mnt, "", O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)), &pipefifo_fops); if (IS_ERR(f)) { free_pipe_info(inode->i_pipe); iput(inode); return PTR_ERR(f); } f->private_data = inode->i_pipe; res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK), &pipefifo_fops); if (IS_ERR(res[0])) { put_pipe_info(inode, inode->i_pipe); fput(f); return PTR_ERR(res[0]); } res[0]->private_data = inode->i_pipe; res[1] = f; return 0; } static int __do_pipe_flags(int *fd, struct file **files, int flags) { int error; int fdw, fdr; if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) return -EINVAL; error = create_pipe_files(files, flags); if (error) return error; error = get_unused_fd_flags(flags); if (error < 0) goto err_read_pipe; fdr = error; error = get_unused_fd_flags(flags); if (error < 0) goto err_fdr; fdw = error; audit_fd_pair(fdr, fdw); fd[0] = fdr; fd[1] = fdw; return 0; err_fdr: put_unused_fd(fdr); err_read_pipe: fput(files[0]); fput(files[1]); return error; } int do_pipe_flags(int *fd, int flags) { struct file *files[2]; int error = __do_pipe_flags(fd, files, flags); if (!error) { fd_install(fd[0], files[0]); fd_install(fd[1], files[1]); } return error; } /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ static int do_pipe2(int __user *fildes, int flags) { struct file *files[2]; int fd[2]; int error; error = __do_pipe_flags(fd, files, flags); if (!error) { if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) { fput(files[0]); fput(files[1]); put_unused_fd(fd[0]); put_unused_fd(fd[1]); error = -EFAULT; } else { fd_install(fd[0], files[0]); fd_install(fd[1], files[1]); } } return error; } SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags) { return do_pipe2(fildes, flags); } SYSCALL_DEFINE1(pipe, int __user *, fildes) { return do_pipe2(fildes, 0); } static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(pipe); if (signal_pending(current)) break; } return cur == *cnt ? -ERESTARTSYS : 0; } static void wake_up_partner(struct pipe_inode_info *pipe) { wake_up_interruptible(&pipe->wait); } static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; int ret; filp->f_version = 0; spin_lock(&inode->i_lock); if (inode->i_pipe) { pipe = inode->i_pipe; pipe->files++; spin_unlock(&inode->i_lock); } else { spin_unlock(&inode->i_lock); pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; pipe->files = 1; spin_lock(&inode->i_lock); if (unlikely(inode->i_pipe)) { inode->i_pipe->files++; spin_unlock(&inode->i_lock); free_pipe_info(pipe); pipe = inode->i_pipe; } else { inode->i_pipe = pipe; spin_unlock(&inode->i_lock); } } filp->private_data = pipe; /* OK, we have a pipe and it's pinned down */ __pipe_lock(pipe); /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(pipe); if (!is_pipe && !pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress EPOLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { if (wait_for_partner(pipe, &pipe->w_counter)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(pipe); if (!is_pipe && !pipe->readers) { if (wait_for_partner(pipe, &pipe->r_counter)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(pipe); break; default: ret = -EINVAL; goto err; } /* Ok! */ __pipe_unlock(pipe); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: __pipe_unlock(pipe); put_pipe_info(inode, pipe); return ret; } const struct file_operations pipefifo_fops = { .open = fifo_open, .llseek = no_llseek, .read_iter = pipe_read, .write_iter = pipe_write, .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .release = pipe_release, .fasync = pipe_fasync, }; /* * Currently we rely on the pipe array holding a power-of-2 number * of pages. Returns 0 on error. */ unsigned int round_pipe_size(unsigned long size) { if (size > (1U << 31)) return 0; /* Minimum pipe size, as required by POSIX */ if (size < PAGE_SIZE) return PAGE_SIZE; return roundup_pow_of_two(size); } /* * Allocate a new array of pipe buffers and copy the info over. Returns the * pipe size if successful, or return -ERROR on error. */ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) { struct pipe_buffer *bufs; unsigned int size, nr_pages; unsigned long user_bufs; long ret = 0; size = round_pipe_size(arg); nr_pages = size >> PAGE_SHIFT; if (!nr_pages) return -EINVAL; /* * If trying to increase the pipe capacity, check that an * unprivileged user is not trying to exceed various limits * (soft limit check here, hard limit check just below). * Decreasing the pipe capacity is always permitted, even * if the user is currently over a limit. */ if (nr_pages > pipe->buffers && size > pipe_max_size && !capable(CAP_SYS_RESOURCE)) return -EPERM; user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); if (nr_pages > pipe->buffers && (too_many_pipe_buffers_hard(user_bufs) || too_many_pipe_buffers_soft(user_bufs)) && is_unprivileged_user()) { ret = -EPERM; goto out_revert_acct; } /* * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't * expect a lot of shrink+grow operations, just free and allocate * again like we would do for growing. If the pipe currently * contains more buffers than arg, then return busy. */ if (nr_pages < pipe->nrbufs) { ret = -EBUSY; goto out_revert_acct; } bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (unlikely(!bufs)) { ret = -ENOMEM; goto out_revert_acct; } /* * The pipe array wraps around, so just start the new one at zero * and adjust the indexes. */ if (pipe->nrbufs) { unsigned int tail; unsigned int head; tail = pipe->curbuf + pipe->nrbufs; if (tail < pipe->buffers) tail = 0; else tail &= (pipe->buffers - 1); head = pipe->nrbufs - tail; if (head) memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); if (tail) memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); } pipe->curbuf = 0; kfree(pipe->bufs); pipe->bufs = bufs; pipe->buffers = nr_pages; return nr_pages * PAGE_SIZE; out_revert_acct: (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers); return ret; } /* * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same * location, so checking ->i_pipe is not enough to verify that this is a * pipe. */ struct pipe_inode_info *get_pipe_info(struct file *file) { return file->f_op == &pipefifo_fops ? file->private_data : NULL; } long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe; long ret; pipe = get_pipe_info(file); if (!pipe) return -EBADF; __pipe_lock(pipe); switch (cmd) { case F_SETPIPE_SZ: ret = pipe_set_size(pipe, arg); break; case F_GETPIPE_SZ: ret = pipe->buffers * PAGE_SIZE; break; default: ret = -EINVAL; break; } __pipe_unlock(pipe); return ret; } static const struct super_operations pipefs_ops = { .destroy_inode = free_inode_nonrcu, .statfs = simple_statfs, }; /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pipe: will go nicely and kill the special-casing in procfs. */ static struct dentry *pipefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "pipe:", &pipefs_ops, &pipefs_dentry_operations, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { .name = "pipefs", .mount = pipefs_mount, .kill_sb = kill_anon_super, }; static int __init init_pipe_fs(void) { int err = register_filesystem(&pipe_fs_type); if (!err) { pipe_mnt = kern_mount(&pipe_fs_type); if (IS_ERR(pipe_mnt)) { err = PTR_ERR(pipe_mnt); unregister_filesystem(&pipe_fs_type); } } return err; } fs_initcall(init_pipe_fs);
./CrossVul/dataset_final_sorted/CWE-416/c/good_820_1
crossvul-cpp_data_good_1660_0
/* * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_malloc.h" #include "mif_cod.h" /******************************************************************************\ * Local types. \******************************************************************************/ typedef enum { MIF_END = 0, MIF_CMPT } mif_tagid2_t; typedef enum { MIF_TLX = 0, MIF_TLY, MIF_WIDTH, MIF_HEIGHT, MIF_HSAMP, MIF_VSAMP, MIF_PREC, MIF_SGND, MIF_DATA } mif_tagid_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts); static void mif_hdr_destroy(mif_hdr_t *hdr); static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts); static mif_hdr_t *mif_hdr_get(jas_stream_t *in); static int mif_process_cmpt(mif_hdr_t *hdr, char *buf); static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out); static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt); static mif_cmpt_t *mif_cmpt_create(void); static void mif_cmpt_destroy(mif_cmpt_t *cmpt); static char *mif_getline(jas_stream_t *jas_stream, char *buf, int bufsize); static int mif_getc(jas_stream_t *in); static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image); /******************************************************************************\ * Local data. \******************************************************************************/ jas_taginfo_t mif_tags2[] = { {MIF_CMPT, "component"}, {MIF_END, "end"}, {-1, 0} }; jas_taginfo_t mif_tags[] = { {MIF_TLX, "tlx"}, {MIF_TLY, "tly"}, {MIF_WIDTH, "width"}, {MIF_HEIGHT, "height"}, {MIF_HSAMP, "sampperx"}, {MIF_VSAMP, "samppery"}, {MIF_PREC, "prec"}, {MIF_SGND, "sgnd"}, {MIF_DATA, "data"}, {-1, 0} }; /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the MIF format. */ jas_image_t *mif_decode(jas_stream_t *in, char *optstr) { mif_hdr_t *hdr; jas_image_t *image; jas_image_t *tmpimage; jas_stream_t *tmpstream; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; /* Avoid warnings about unused parameters. */ optstr = 0; hdr = 0; image = 0; tmpimage = 0; tmpstream = 0; data = 0; if (!(hdr = mif_hdr_get(in))) { goto error; } if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; tmpstream = cmpt->data ? jas_stream_fopen(cmpt->data, "rb") : in; if (!tmpstream) { goto error; } if (!(tmpimage = jas_image_decode(tmpstream, -1, 0))) { goto error; } if (tmpstream != in) { jas_stream_close(tmpstream); tmpstream = 0; } if (!cmpt->width) { cmpt->width = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->height) { cmpt->height = jas_image_cmptwidth(tmpimage, 0); } if (!cmpt->prec) { cmpt->prec = jas_image_cmptprec(tmpimage, 0); } if (cmpt->sgnd < 0) { cmpt->sgnd = jas_image_cmptsgnd(tmpimage, 0); } cmptparm.tlx = cmpt->tlx; cmptparm.tly = cmpt->tly; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = cmpt->sgnd; if (jas_image_addcmpt(image, jas_image_numcmpts(image), &cmptparm)) { goto error; } if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) -= bias; } } } if (jas_image_writecmpt(image, jas_image_numcmpts(image) - 1, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; jas_image_destroy(tmpimage); tmpimage = 0; } mif_hdr_destroy(hdr); hdr = 0; return image; error: if (image) { jas_image_destroy(image); } if (hdr) { mif_hdr_destroy(hdr); } if (tmpstream && tmpstream != in) { jas_stream_close(tmpstream); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return 0; } /******************************************************************************\ * Code for save operation. \******************************************************************************/ /* Save an image to a stream in the the MIF format. */ int mif_encode(jas_image_t *image, jas_stream_t *out, char *optstr) { mif_hdr_t *hdr; jas_image_t *tmpimage; int fmt; int cmptno; mif_cmpt_t *cmpt; jas_image_cmptparm_t cmptparm; jas_seq2d_t *data; int_fast32_t x; int_fast32_t y; int bias; hdr = 0; tmpimage = 0; data = 0; if (optstr && *optstr != '\0') { jas_eprintf("warning: ignoring unsupported options\n"); } if ((fmt = jas_image_strtofmt("pnm")) < 0) { jas_eprintf("error: PNM support required\n"); goto error; } if (!(hdr = mif_makehdrfromimage(image))) { goto error; } if (mif_hdr_put(hdr, out)) { goto error; } /* Output component data. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; if (!cmpt->data) { if (!(tmpimage = jas_image_create0())) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = cmpt->sampperx; cmptparm.vstep = cmpt->samppery; cmptparm.width = cmpt->width; cmptparm.height = cmpt->height; cmptparm.prec = cmpt->prec; cmptparm.sgnd = false; if (jas_image_addcmpt(tmpimage, jas_image_numcmpts(tmpimage), &cmptparm)) { goto error; } if (!(data = jas_seq2d_create(0, 0, cmpt->width, cmpt->height))) { goto error; } if (jas_image_readcmpt(image, cmptno, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } if (cmpt->sgnd) { bias = 1 << (cmpt->prec - 1); for (y = 0; y < cmpt->height; ++y) { for (x = 0; x < cmpt->width; ++x) { *jas_seq2d_getref(data, x, y) += bias; } } } if (jas_image_writecmpt(tmpimage, 0, 0, 0, cmpt->width, cmpt->height, data)) { goto error; } jas_seq2d_destroy(data); data = 0; if (jas_image_encode(tmpimage, out, fmt, 0)) { goto error; } jas_image_destroy(tmpimage); tmpimage = 0; } } mif_hdr_destroy(hdr); return 0; error: if (hdr) { mif_hdr_destroy(hdr); } if (tmpimage) { jas_image_destroy(tmpimage); } if (data) { jas_seq2d_destroy(data); } return -1; } /******************************************************************************\ * Code for validate operation. \******************************************************************************/ int mif_validate(jas_stream_t *in) { uchar buf[MIF_MAGICLEN]; uint_fast32_t magic; int i; int n; assert(JAS_STREAM_MAXPUTBACK >= MIF_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, MIF_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Was enough data read? */ if (n < MIF_MAGICLEN) { return -1; } /* Compute the signature value. */ magic = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; /* Ensure that the signature is correct for this format. */ if (magic != MIF_MAGIC) { return -1; } return 0; } /******************************************************************************\ * Code for MIF header class. \******************************************************************************/ static mif_hdr_t *mif_hdr_create(int maxcmpts) { mif_hdr_t *hdr; if (!(hdr = jas_malloc(sizeof(mif_hdr_t)))) { return 0; } hdr->numcmpts = 0; hdr->maxcmpts = 0; hdr->cmpts = 0; if (mif_hdr_growcmpts(hdr, maxcmpts)) { mif_hdr_destroy(hdr); return 0; } return hdr; } static void mif_hdr_destroy(mif_hdr_t *hdr) { int cmptno; if (hdr->cmpts) { for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { mif_cmpt_destroy(hdr->cmpts[cmptno]); } jas_free(hdr->cmpts); } jas_free(hdr); } static int mif_hdr_growcmpts(mif_hdr_t *hdr, int maxcmpts) { int cmptno; mif_cmpt_t **newcmpts; assert(maxcmpts >= hdr->numcmpts); newcmpts = (!hdr->cmpts) ? jas_malloc(maxcmpts * sizeof(mif_cmpt_t *)) : jas_realloc(hdr->cmpts, maxcmpts * sizeof(mif_cmpt_t *)); if (!newcmpts) { return -1; } hdr->maxcmpts = maxcmpts; hdr->cmpts = newcmpts; for (cmptno = hdr->numcmpts; cmptno < hdr->maxcmpts; ++cmptno) { hdr->cmpts[cmptno] = 0; } return 0; } static mif_hdr_t *mif_hdr_get(jas_stream_t *in) { uchar magicbuf[MIF_MAGICLEN]; char buf[4096]; mif_hdr_t *hdr; bool done; jas_tvparser_t *tvp; int id; hdr = 0; if (jas_stream_read(in, magicbuf, MIF_MAGICLEN) != MIF_MAGICLEN) { goto error; } if (magicbuf[0] != (MIF_MAGIC >> 24) || magicbuf[1] != ((MIF_MAGIC >> 16) & 0xff) || magicbuf[2] != ((MIF_MAGIC >> 8) & 0xff) || magicbuf[3] != (MIF_MAGIC & 0xff)) { jas_eprintf("error: bad signature\n"); goto error; } if (!(hdr = mif_hdr_create(0))) { goto error; } done = false; do { if (!mif_getline(in, buf, sizeof(buf))) { goto error; } if (buf[0] == '\0') { continue; } if (!(tvp = jas_tvparser_create(buf))) { goto error; } if (jas_tvparser_next(tvp)) { abort(); } id = jas_taginfo_nonull(jas_taginfos_lookup(mif_tags2, jas_tvparser_gettag(tvp)))->id; jas_tvparser_destroy(tvp); switch (id) { case MIF_CMPT: mif_process_cmpt(hdr, buf); break; case MIF_END: done = 1; break; } } while (!done); return hdr; error: if (hdr) { mif_hdr_destroy(hdr); } return 0; } static int mif_process_cmpt(mif_hdr_t *hdr, char *buf) { jas_tvparser_t *tvp; mif_cmpt_t *cmpt; int id; cmpt = 0; tvp = 0; if (!(cmpt = mif_cmpt_create())) { goto error; } cmpt->tlx = 0; cmpt->tly = 0; cmpt->sampperx = 0; cmpt->samppery = 0; cmpt->width = 0; cmpt->height = 0; cmpt->prec = 0; cmpt->sgnd = -1; cmpt->data = 0; if (!(tvp = jas_tvparser_create(buf))) { goto error; } while (!(id = jas_tvparser_next(tvp))) { switch (jas_taginfo_nonull(jas_taginfos_lookup(mif_tags, jas_tvparser_gettag(tvp)))->id) { case MIF_TLX: cmpt->tlx = atoi(jas_tvparser_getval(tvp)); break; case MIF_TLY: cmpt->tly = atoi(jas_tvparser_getval(tvp)); break; case MIF_WIDTH: cmpt->width = atoi(jas_tvparser_getval(tvp)); break; case MIF_HEIGHT: cmpt->height = atoi(jas_tvparser_getval(tvp)); break; case MIF_HSAMP: cmpt->sampperx = atoi(jas_tvparser_getval(tvp)); break; case MIF_VSAMP: cmpt->samppery = atoi(jas_tvparser_getval(tvp)); break; case MIF_PREC: cmpt->prec = atoi(jas_tvparser_getval(tvp)); break; case MIF_SGND: cmpt->sgnd = atoi(jas_tvparser_getval(tvp)); break; case MIF_DATA: if (!(cmpt->data = jas_strdup(jas_tvparser_getval(tvp)))) { return -1; } break; } } if (!cmpt->sampperx || !cmpt->samppery) { goto error; } if (mif_hdr_addcmpt(hdr, hdr->numcmpts, cmpt)) { goto error; } jas_tvparser_destroy(tvp); return 0; error: if (cmpt) { mif_cmpt_destroy(cmpt); } if (tvp) { jas_tvparser_destroy(tvp); } return -1; } static int mif_hdr_put(mif_hdr_t *hdr, jas_stream_t *out) { int cmptno; mif_cmpt_t *cmpt; /* Output signature. */ jas_stream_putc(out, (MIF_MAGIC >> 24) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 16) & 0xff); jas_stream_putc(out, (MIF_MAGIC >> 8) & 0xff); jas_stream_putc(out, MIF_MAGIC & 0xff); /* Output component information. */ for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { cmpt = hdr->cmpts[cmptno]; jas_stream_printf(out, "component tlx=%ld tly=%ld " "sampperx=%ld samppery=%ld width=%ld height=%ld prec=%d sgnd=%d", cmpt->tlx, cmpt->tly, cmpt->sampperx, cmpt->samppery, cmpt->width, cmpt->height, cmpt->prec, cmpt->sgnd); if (cmpt->data) { jas_stream_printf(out, " data=%s", cmpt->data); } jas_stream_printf(out, "\n"); } /* Output end of header indicator. */ jas_stream_printf(out, "end\n"); return 0; } static int mif_hdr_addcmpt(mif_hdr_t *hdr, int cmptno, mif_cmpt_t *cmpt) { assert(cmptno >= hdr->numcmpts); if (hdr->numcmpts >= hdr->maxcmpts) { if (mif_hdr_growcmpts(hdr, hdr->numcmpts + 128)) { return -1; } } hdr->cmpts[hdr->numcmpts] = cmpt; ++hdr->numcmpts; return 0; } /******************************************************************************\ * Code for MIF component class. \******************************************************************************/ static mif_cmpt_t *mif_cmpt_create() { mif_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(mif_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(mif_cmpt_t)); return cmpt; } static void mif_cmpt_destroy(mif_cmpt_t *cmpt) { if (cmpt->data) { jas_free(cmpt->data); } jas_free(cmpt); } /******************************************************************************\ * MIF parsing code. \******************************************************************************/ static char *mif_getline(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = mif_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; if (!(bufptr = strchr(buf, '\n'))) { return 0; } *bufptr = '\0'; return buf; } static int mif_getc(jas_stream_t *in) { int c; bool done; done = false; do { switch (c = jas_stream_getc(in)) { case EOF: done = 1; break; case '#': for (;;) { if ((c = jas_stream_getc(in)) == EOF) { done = 1; break; } if (c == '\n') { break; } } break; case '\\': if (jas_stream_peekc(in) == '\n') { jas_stream_getc(in); } break; default: done = 1; break; } } while (!done); return c; } /******************************************************************************\ * Miscellaneous functions. \******************************************************************************/ static mif_hdr_t *mif_makehdrfromimage(jas_image_t *image) { mif_hdr_t *hdr; int cmptno; mif_cmpt_t *cmpt; if (!(hdr = mif_hdr_create(jas_image_numcmpts(image)))) { return 0; } hdr->magic = MIF_MAGIC; hdr->numcmpts = jas_image_numcmpts(image); for (cmptno = 0; cmptno < hdr->numcmpts; ++cmptno) { hdr->cmpts[cmptno] = jas_malloc(sizeof(mif_cmpt_t)); cmpt = hdr->cmpts[cmptno]; cmpt->tlx = jas_image_cmpttlx(image, cmptno); cmpt->tly = jas_image_cmpttly(image, cmptno); cmpt->width = jas_image_cmptwidth(image, cmptno); cmpt->height = jas_image_cmptheight(image, cmptno); cmpt->sampperx = jas_image_cmpthstep(image, cmptno); cmpt->samppery = jas_image_cmptvstep(image, cmptno); cmpt->prec = jas_image_cmptprec(image, cmptno); cmpt->sgnd = jas_image_cmptsgnd(image, cmptno); cmpt->data = 0; } return hdr; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_1660_0
crossvul-cpp_data_bad_545_0
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> * - July2000 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 */ /* * This handles all read/write requests to block devices */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/string.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> #include <linux/fault-inject.h> #include <linux/list_sort.h> #include <linux/delay.h> #include <linux/ratelimit.h> #include <linux/pm_runtime.h> #include <linux/blk-cgroup.h> #include <linux/debugfs.h> #include <linux/bpf.h> #define CREATE_TRACE_POINTS #include <trace/events/block.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-sched.h" #include "blk-rq-qos.h" #ifdef CONFIG_DEBUG_FS struct dentry *blk_debugfs_root; #endif EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); EXPORT_TRACEPOINT_SYMBOL_GPL(block_split); EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); DEFINE_IDA(blk_queue_ida); /* * For the allocated request tables */ struct kmem_cache *request_cachep; /* * For queue allocation */ struct kmem_cache *blk_requestq_cachep; /* * Controlling structure to kblockd */ static struct workqueue_struct *kblockd_workqueue; /** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set * @q: request queue */ void blk_queue_flag_set(unsigned int flag, struct request_queue *q) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); queue_flag_set(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_queue_flag_set); /** * blk_queue_flag_clear - atomically clear a queue flag * @flag: flag to be cleared * @q: request queue */ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); queue_flag_clear(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_queue_flag_clear); /** * blk_queue_flag_test_and_set - atomically test and set a queue flag * @flag: flag to be set * @q: request queue * * Returns the previous value of @flag - 0 if the flag was not set and 1 if * the flag was already set. */ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) { unsigned long flags; bool res; spin_lock_irqsave(q->queue_lock, flags); res = queue_flag_test_and_set(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); return res; } EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); /** * blk_queue_flag_test_and_clear - atomically test and clear a queue flag * @flag: flag to be cleared * @q: request queue * * Returns the previous value of @flag - 0 if the flag was not set and 1 if * the flag was set. */ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) { unsigned long flags; bool res; spin_lock_irqsave(q->queue_lock, flags); res = queue_flag_test_and_clear(flag, q); spin_unlock_irqrestore(q->queue_lock, flags); return res; } EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear); static void blk_clear_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK clear_wb_congested(rl->blkg->wb_congested, sync); #else /* * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't * flip its congestion state for events on other blkcgs. */ if (rl == &rl->q->root_rl) clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif } static void blk_set_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK set_wb_congested(rl->blkg->wb_congested, sync); #else /* see blk_clear_congested() */ if (rl == &rl->q->root_rl) set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif } void blk_queue_congestion_threshold(struct request_queue *q) { int nr; nr = q->nr_requests - (q->nr_requests / 8) + 1; if (nr > q->nr_requests) nr = q->nr_requests; q->nr_congestion_on = nr; nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; if (nr < 1) nr = 1; q->nr_congestion_off = nr; } void blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->tag = -1; rq->internal_tag = -1; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; } EXPORT_SYMBOL(blk_rq_init); static const struct { int errno; const char *name; } blk_errors[] = { [BLK_STS_OK] = { 0, "" }, [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" }, [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" }, [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" }, [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, /* device mapper special case, should not leak out: */ [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, /* everything else not covered above: */ [BLK_STS_IOERR] = { -EIO, "I/O" }, }; blk_status_t errno_to_blk_status(int errno) { int i; for (i = 0; i < ARRAY_SIZE(blk_errors); i++) { if (blk_errors[i].errno == errno) return (__force blk_status_t)i; } return BLK_STS_IOERR; } EXPORT_SYMBOL_GPL(errno_to_blk_status); int blk_status_to_errno(blk_status_t status) { int idx = (__force int)status; if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return -EIO; return blk_errors[idx].errno; } EXPORT_SYMBOL_GPL(blk_status_to_errno); static void print_req_error(struct request *req, blk_status_t status) { int idx = (__force int)status; if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return; printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n", __func__, blk_errors[idx].name, req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)blk_rq_pos(req)); } static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, blk_status_t error) { if (error) bio->bi_status = error; if (unlikely(rq->rq_flags & RQF_QUIET)) bio_set_flag(bio, BIO_QUIET); bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) bio_endio(bio); } void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", (unsigned long long) rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, len %u\n", rq->bio, rq->biotail, blk_rq_bytes(rq)); } EXPORT_SYMBOL(blk_dump_rq_flags); static void blk_delay_work(struct work_struct *work) { struct request_queue *q; q = container_of(work, struct request_queue, delay_work.work); spin_lock_irq(q->queue_lock); __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } /** * blk_delay_queue - restart queueing after defined interval * @q: The &struct request_queue in question * @msecs: Delay in msecs * * Description: * Sometimes queueing needs to be postponed for a little while, to allow * resources to come back. This function will make sure that queueing is * restarted around the specified time. */ void blk_delay_queue(struct request_queue *q, unsigned long msecs) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (likely(!blk_queue_dead(q))) queue_delayed_work(kblockd_workqueue, &q->delay_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_delay_queue); /** * blk_start_queue_async - asynchronously restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue_async() will clear the stop flag on the queue, and * ensure that the request_fn for the queue is run from an async * context. **/ void blk_start_queue_async(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); queue_flag_clear(QUEUE_FLAG_STOPPED, q); blk_run_queue_async(q); } EXPORT_SYMBOL(blk_start_queue_async); /** * blk_start_queue - restart a previously stopped queue * @q: The &struct request_queue in question * * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when * entered. Also see blk_stop_queue(). **/ void blk_start_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); } EXPORT_SYMBOL(blk_start_queue); /** * blk_stop_queue - stop a queue * @q: The &struct request_queue in question * * Description: * The Linux block layer assumes that a block driver will consume all * entries on the request queue when the request_fn strategy is called. * Often this will not happen, because of hardware limitations (queue * depth settings). If a device driver gets a 'queue full' response, * or if it simply chooses not to queue more I/O at one point, it can * call this function to prevent the request_fn from being called until * the driver has signalled it's ready to go again. This happens by calling * blk_start_queue() to restart queue operations. **/ void blk_stop_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); } EXPORT_SYMBOL(blk_stop_queue); /** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: * The block layer may perform asynchronous callback activity * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure * that its ->make_request_fn will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevator_exit() * and blkcg_exit_queue() to be called with queue lock initialized. * */ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); cancel_work_sync(&q->timeout_work); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; int i; cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { cancel_delayed_work_sync(&q->delay_work); } } EXPORT_SYMBOL(blk_sync_queue); /** * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY * @q: request queue pointer * * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not * set and 1 if the flag was already set. */ int blk_set_preempt_only(struct request_queue *q) { return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); } EXPORT_SYMBOL_GPL(blk_set_preempt_only); void blk_clear_preempt_only(struct request_queue *q) { blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); wake_up_all(&q->mq_freeze_wq); } EXPORT_SYMBOL_GPL(blk_clear_preempt_only); /** * __blk_run_queue_uncond - run a queue whether or not it has been stopped * @q: The queue to run * * Description: * Invoke request handling on a queue if there are any pending requests. * May be used to restart request handling after a request has completed. * This variant runs the queue whether or not the queue has been * stopped. Must be called with the queue lock held and interrupts * disabled. See also @blk_run_queue. */ inline void __blk_run_queue_uncond(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (unlikely(blk_queue_dead(q))) return; /* * Some request_fn implementations, e.g. scsi_request_fn(), unlock * the queue lock internally. As a result multiple threads may be * running such a request function concurrently. Keep track of the * number of active request_fn invocations such that blk_drain_queue() * can wait until all these request_fn calls have finished. */ q->request_fn_active++; q->request_fn(q); q->request_fn_active--; } EXPORT_SYMBOL_GPL(__blk_run_queue_uncond); /** * __blk_run_queue - run a single device queue * @q: The queue to run * * Description: * See @blk_run_queue. */ void __blk_run_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (unlikely(blk_queue_stopped(q))) return; __blk_run_queue_uncond(q); } EXPORT_SYMBOL(__blk_run_queue); /** * blk_run_queue_async - run a single device queue in workqueue context * @q: The queue to run * * Description: * Tells kblockd to perform the equivalent of @blk_run_queue on behalf * of us. * * Note: * Since it is not allowed to run q->delay_work after blk_cleanup_queue() * has canceled q->delay_work, callers must hold the queue lock to avoid * race conditions between blk_cleanup_queue() and blk_run_queue_async(). */ void blk_run_queue_async(struct request_queue *q) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q))) mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); } EXPORT_SYMBOL(blk_run_queue_async); /** * blk_run_queue - run a single device queue * @q: The queue to run * * Description: * Invoke request handling on this queue, if it has pending work to do. * May be used to restart queueing when a request has completed. */ void blk_run_queue(struct request_queue *q) { unsigned long flags; WARN_ON_ONCE(q->mq_ops); spin_lock_irqsave(q->queue_lock, flags); __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); void blk_put_queue(struct request_queue *q) { kobject_put(&q->kobj); } EXPORT_SYMBOL(blk_put_queue); /** * __blk_drain_queue - drain requests from request_queue * @q: queue to drain * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV * * Drain requests from @q. If @drain_all is set, all requests are drained. * If not, only ELVPRIV requests are drained. The caller is responsible * for ensuring that no new requests which need to be drained are queued. */ static void __blk_drain_queue(struct request_queue *q, bool drain_all) __releases(q->queue_lock) __acquires(q->queue_lock) { int i; lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); while (true) { bool drain = false; /* * The caller might be trying to drain @q before its * elevator is initialized. */ if (q->elevator) elv_drain_elevator(q); blkcg_drain_queue(q); /* * This function might be called on a queue which failed * driver init after queue creation or is not yet fully * active yet. Some drivers (e.g. fd and loop) get unhappy * in such cases. Kick queue iff dispatch queue has * something on it and @q has request_fn set. */ if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); drain |= q->nr_rqs_elvpriv; drain |= q->request_fn_active; /* * Unfortunately, requests are queued at and tracked from * multiple places and there's no single counter which can * be drained. Check all the queues and counters. */ if (drain_all) { struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; if (fq) drain |= !list_empty(&fq->flush_queue[i]); } } if (!drain) break; spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); } /* * With queue marked dead, any woken up waiter will fail the * allocation path, so the wakeup chaining is lost and we're * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { struct request_list *rl; blk_queue_for_each_rl(rl, q) for (i = 0; i < ARRAY_SIZE(rl->wait); i++) wake_up_all(&rl->wait[i]); } } void blk_drain_queue(struct request_queue *q) { spin_lock_irq(q->queue_lock); __blk_drain_queue(q, true); spin_unlock_irq(q->queue_lock); } /** * blk_queue_bypass_start - enter queue bypass mode * @q: queue of interest * * In bypass mode, only the dispatch FIFO queue of @q is used. This * function makes @q enter bypass mode and drains all requests which were * throttled or issued before. On return, it's guaranteed that no request * is being throttled or has ELVPRIV set and blk_queue_bypass() %true * inside queue or RCU read lock. */ void blk_queue_bypass_start(struct request_queue *q) { WARN_ON_ONCE(q->mq_ops); spin_lock_irq(q->queue_lock); q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); spin_unlock_irq(q->queue_lock); /* * Queues start drained. Skip actual draining till init is * complete. This avoids lenghty delays during queue init which * can happen many times during boot. */ if (blk_queue_init_done(q)) { spin_lock_irq(q->queue_lock); __blk_drain_queue(q, false); spin_unlock_irq(q->queue_lock); /* ensure blk_queue_bypass() is %true inside RCU read lock */ synchronize_rcu(); } } EXPORT_SYMBOL_GPL(blk_queue_bypass_start); /** * blk_queue_bypass_end - leave queue bypass mode * @q: queue of interest * * Leave bypass mode and restore the normal queueing behavior. * * Note: although blk_queue_bypass_start() is only called for blk-sq queues, * this function is called for both blk-sq and blk-mq queues. */ void blk_queue_bypass_end(struct request_queue *q) { spin_lock_irq(q->queue_lock); if (!--q->bypass_depth) queue_flag_clear(QUEUE_FLAG_BYPASS, q); WARN_ON_ONCE(q->bypass_depth < 0); spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL_GPL(blk_queue_bypass_end); void blk_set_queue_dying(struct request_queue *q) { blk_queue_flag_set(QUEUE_FLAG_DYING, q); /* * When queue DYING flag is set, we need to block new req * entering queue, so we call blk_freeze_queue_start() to * prevent I/O from crossing blk_queue_enter(). */ blk_freeze_queue_start(q); if (q->mq_ops) blk_mq_wake_waiters(q); else { struct request_list *rl; spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { wake_up_all(&rl->wait[BLK_RW_SYNC]); wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); } /* Make blk_queue_enter() reexamine the DYING flag. */ wake_up_all(&q->mq_freeze_wq); } EXPORT_SYMBOL_GPL(blk_set_queue_dying); /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown * * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and * put it. All future requests will be failed immediately with -ENODEV. */ void blk_cleanup_queue(struct request_queue *q) { spinlock_t *lock = q->queue_lock; /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); blk_set_queue_dying(q); spin_lock_irq(lock); /* * A dying queue is permanently in bypass mode till released. Note * that, unlike blk_queue_bypass_start(), we aren't performing * synchronize_rcu() after entering bypass mode to avoid the delay * as some drivers create and destroy a lot of queues while * probing. This is still safe because blk_release_queue() will be * called only after the queue refcnt drops to zero and nothing, * RCU or not, would be traversing the queue by then. */ q->bypass_depth++; queue_flag_set(QUEUE_FLAG_BYPASS, q); queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_DYING, q); spin_unlock_irq(lock); mutex_unlock(&q->sysfs_lock); /* * Drain all requests queued before DYING marking. Set DEAD flag to * prevent that q->request_fn() gets invoked after draining finished. */ blk_freeze_queue(q); spin_lock_irq(lock); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); /* * make sure all in-progress dispatch are completed because * blk_freeze_queue() can only complete all requests, and * dispatch may still be in-progress since we dispatch requests * from more than one contexts. * * No need to quiesce queue if it isn't initialized yet since * blk_freeze_queue() should be enough for cases of passthrough * request. */ if (q->mq_ops && blk_queue_init_done(q)) blk_mq_quiesce_queue(q); /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); /* @q won't process any more request, flush async actions */ del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); /* * I/O scheduler exit is only safe after the sysfs scheduler attribute * has been removed. */ WARN_ON_ONCE(q->kobj.state_in_sysfs); /* * Since the I/O scheduler exit code may access cgroup information, * perform I/O scheduler exit before disassociating from the block * cgroup controller. */ if (q->elevator) { ioc_clear_queue(q); elevator_exit(q, q->elevator); q->elevator = NULL; } /* * Remove all references to @q from the block cgroup controller before * restoring @q->queue_lock to avoid that restoring this pointer causes * e.g. blkcg_print_blkgs() to crash. */ blkcg_exit_queue(q); /* * Since the cgroup code may dereference the @q->backing_dev_info * pointer, only decrease its reference count after having removed the * association with the block cgroup controller. */ bdi_put(q->backing_dev_info); if (q->mq_ops) blk_mq_free_queue(q); percpu_ref_exit(&q->q_usage_counter); spin_lock_irq(lock); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); } EXPORT_SYMBOL(blk_cleanup_queue); /* Allocate memory local to the request queue */ static void *alloc_request_simple(gfp_t gfp_mask, void *data) { struct request_queue *q = data; return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); } static void free_request_simple(void *element, void *data) { kmem_cache_free(request_cachep, element); } static void *alloc_request_size(gfp_t gfp_mask, void *data) { struct request_queue *q = data; struct request *rq; rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, q->node); if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { kfree(rq); rq = NULL; } return rq; } static void free_request_size(void *element, void *data) { struct request_queue *q = data; if (q->exit_rq_fn) q->exit_rq_fn(q, element); kfree(element); } int blk_init_rl(struct request_list *rl, struct request_queue *q, gfp_t gfp_mask) { if (unlikely(rl->rq_pool) || q->mq_ops) return 0; rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); if (q->cmd_size) { rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_size, free_request_size, q, gfp_mask, q->node); } else { rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_simple, free_request_simple, q, gfp_mask, q->node); } if (!rl->rq_pool) return -ENOMEM; if (rl != &q->root_rl) WARN_ON_ONCE(!blk_get_queue(q)); return 0; } void blk_exit_rl(struct request_queue *q, struct request_list *rl) { if (rl->rq_pool) { mempool_destroy(rl->rq_pool); if (rl != &q->root_rl) blk_put_queue(q); } } struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); } EXPORT_SYMBOL(blk_alloc_queue); /** * blk_queue_enter() - try to increase q->q_usage_counter * @q: request queue pointer * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT */ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) { const bool preempt = flags & BLK_MQ_REQ_PREEMPT; while (true) { bool success = false; rcu_read_lock(); if (percpu_ref_tryget_live(&q->q_usage_counter)) { /* * The code that sets the PREEMPT_ONLY flag is * responsible for ensuring that that flag is globally * visible before the queue is unfrozen. */ if (preempt || !blk_queue_preempt_only(q)) { success = true; } else { percpu_ref_put(&q->q_usage_counter); } } rcu_read_unlock(); if (success) return 0; if (flags & BLK_MQ_REQ_NOWAIT) return -EBUSY; /* * read pair of barrier in blk_freeze_queue_start(), * we need to order reading __PERCPU_REF_DEAD flag of * .q_usage_counter and reading .mq_freeze_depth or * queue dying flag, otherwise the following wait may * never return if the two reads are reordered. */ smp_rmb(); wait_event(q->mq_freeze_wq, (atomic_read(&q->mq_freeze_depth) == 0 && (preempt || !blk_queue_preempt_only(q))) || blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; } } void blk_queue_exit(struct request_queue *q) { percpu_ref_put(&q->q_usage_counter); } static void blk_queue_usage_counter_release(struct percpu_ref *ref) { struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); wake_up_all(&q->mq_freeze_wq); } static void blk_rq_timed_out_timer(struct timer_list *t) { struct request_queue *q = from_timer(q, t, timeout); kblockd_schedule_work(&q->timeout_work); } /** * blk_alloc_queue_node - allocate a request queue * @gfp_mask: memory allocation flags * @node_id: NUMA node to allocate memory from * @lock: For legacy queues, pointer to a spinlock that will be used to e.g. * serialize calls to the legacy .request_fn() callback. Ignored for * blk-mq request queues. * * Note: pass the queue lock as the third argument to this function instead of * setting the queue lock pointer explicitly to avoid triggering a sporadic * crash in the blkcg code. This function namely calls blkcg_init_queue() and * the queue lock pointer must be set before blkcg_init_queue() is called. */ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, spinlock_t *lock) { struct request_queue *q; int ret; q = kmem_cache_alloc_node(blk_requestq_cachep, gfp_mask | __GFP_ZERO, node_id); if (!q) return NULL; INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; q->boundary_rq = NULL; q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) goto fail_q; ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (ret) goto fail_id; q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); if (!q->backing_dev_info) goto fail_split; q->stats = blk_alloc_queue_stats(); if (!q->stats) goto fail_stats; q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); INIT_WORK(&q->timeout_work, NULL); INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); #ifdef CONFIG_BLK_CGROUP INIT_LIST_HEAD(&q->blkg_list); #endif INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); kobject_init(&q->kobj, &blk_queue_ktype); #ifdef CONFIG_BLK_DEV_IO_TRACE mutex_init(&q->blk_trace_mutex); #endif mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); if (!q->mq_ops) q->queue_lock = lock ? : &q->__queue_lock; /* * A queue starts its life with bypass turned on to avoid * unnecessary bypass on/off overhead and nasty surprises during * init. The initial bypass will be finished when the queue is * registered by blk_register_queue(). */ q->bypass_depth = 1; queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); /* * Init percpu_ref in atomic mode so that it's faster to shutdown. * See blk_register_queue() for details. */ if (percpu_ref_init(&q->q_usage_counter, blk_queue_usage_counter_release, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) goto fail_bdi; if (blkcg_init_queue(q)) goto fail_ref; return q; fail_ref: percpu_ref_exit(&q->q_usage_counter); fail_bdi: blk_free_queue_stats(q->stats); fail_stats: bdi_put(q->backing_dev_info); fail_split: bioset_exit(&q->bio_split); fail_id: ida_simple_remove(&blk_queue_ida, q->id); fail_q: kmem_cache_free(blk_requestq_cachep, q); return NULL; } EXPORT_SYMBOL(blk_alloc_queue_node); /** * blk_init_queue - prepare a request queue for use with a block device * @rfn: The function to be called to process requests that have been * placed on the queue. * @lock: Request queue spin lock * * Description: * If a block device wishes to use the standard request handling procedures, * which sorts requests and coalesces adjacent requests, then it must * call blk_init_queue(). The function @rfn will be called when there * are requests on the queue that need to be processed. If the device * supports plugging, then @rfn may not be called immediately when requests * are available on the queue, but may be called at some time later instead. * Plugged queues are generally unplugged when a buffer belonging to one * of the requests on the queue is needed, or due to memory pressure. * * @rfn is not required, or even expected, to remove all requests off the * queue, but only as many as it can handle at a time. If it does leave * requests on the queue, it is responsible for arranging that the requests * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the * request queue; this lock will be taken also from interrupt context, so irq * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or %NULL if * it didn't succeed. * * Note: * blk_init_queue() must be paired with a blk_cleanup_queue() call * when the block device is deactivated (such as at module unload). **/ struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { return blk_init_queue_node(rfn, lock, NUMA_NO_NODE); } EXPORT_SYMBOL(blk_init_queue); struct request_queue * blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { struct request_queue *q; q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock); if (!q) return NULL; q->request_fn = rfn; if (blk_init_allocated_queue(q) < 0) { blk_cleanup_queue(q); return NULL; } return q; } EXPORT_SYMBOL(blk_init_queue_node); static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); int blk_init_allocated_queue(struct request_queue *q) { WARN_ON_ONCE(q->mq_ops); q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size); if (!q->fq) return -ENOMEM; if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL)) goto out_free_flush_queue; if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) goto out_exit_flush_rq; INIT_WORK(&q->timeout_work, blk_timeout_work); q->queue_flags |= QUEUE_FLAG_DEFAULT; /* * This also sets hw/phys segments, boundary and size */ blk_queue_make_request(q, blk_queue_bio); q->sg_reserved_size = INT_MAX; if (elevator_init(q)) goto out_exit_flush_rq; return 0; out_exit_flush_rq: if (q->exit_rq_fn) q->exit_rq_fn(q, q->fq->flush_rq); out_free_flush_queue: blk_free_flush_queue(q->fq); return -ENOMEM; } EXPORT_SYMBOL(blk_init_allocated_queue); bool blk_get_queue(struct request_queue *q) { if (likely(!blk_queue_dying(q))) { __blk_get_queue(q); return true; } return false; } EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(struct request_list *rl, struct request *rq) { if (rq->rq_flags & RQF_ELVPRIV) { elv_put_request(rl->q, rq); if (rq->elv.icq) put_io_context(rq->elv.icq->ioc); } mempool_free(rq, rl->rq_pool); } /* * ioc_batching returns true if the ioc is a valid batching request and * should be given priority access to a request. */ static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc) return 0; /* * Make sure the process is able to allocate at least 1 request * even if the batch times out, otherwise we could theoretically * lose wakeups. */ return ioc->nr_batch_requests == q->nr_batching || (ioc->nr_batch_requests > 0 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); } /* * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This * will cause the process to be a "batcher" on all queues in the system. This * is the behaviour we want though - once it gets a wakeup it should be given * a nice run. */ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc || ioc_batching(q, ioc)) return; ioc->nr_batch_requests = q->nr_batching; ioc->last_waited = jiffies; } static void __freed_request(struct request_list *rl, int sync) { struct request_queue *q = rl->q; if (rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_congested(rl, sync); if (rl->count[sync] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); blk_clear_rl_full(rl, sync); } } /* * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ static void freed_request(struct request_list *rl, bool sync, req_flags_t rq_flags) { struct request_queue *q = rl->q; q->nr_rqs[sync]--; rl->count[sync]--; if (rq_flags & RQF_ELVPRIV) q->nr_rqs_elvpriv--; __freed_request(rl, sync); if (unlikely(rl->starved[sync ^ 1])) __freed_request(rl, sync ^ 1); } int blk_update_nr_requests(struct request_queue *q, unsigned int nr) { struct request_list *rl; int on_thresh, off_thresh; WARN_ON_ONCE(q->mq_ops); spin_lock_irq(q->queue_lock); q->nr_requests = nr; blk_queue_congestion_threshold(q); on_thresh = queue_congestion_on_threshold(q); off_thresh = queue_congestion_off_threshold(q); blk_queue_for_each_rl(rl, q) { if (rl->count[BLK_RW_SYNC] >= on_thresh) blk_set_congested(rl, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < off_thresh) blk_clear_congested(rl, BLK_RW_SYNC); if (rl->count[BLK_RW_ASYNC] >= on_thresh) blk_set_congested(rl, BLK_RW_ASYNC); else if (rl->count[BLK_RW_ASYNC] < off_thresh) blk_clear_congested(rl, BLK_RW_ASYNC); if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_set_rl_full(rl, BLK_RW_SYNC); } else { blk_clear_rl_full(rl, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_set_rl_full(rl, BLK_RW_ASYNC); } else { blk_clear_rl_full(rl, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); return 0; } /** * __get_request - get a free request * @rl: request list to allocate from * @op: operation and flags * @bio: bio to allocate request for (can be %NULL) * @flags: BLQ_MQ_REQ_* flags * @gfp_mask: allocator flags * * Get a free request from @q. This function may fail under memory * pressure or if @q is dead. * * Must be called with @q->queue_lock held and, * Returns ERR_PTR on failure, with @q->queue_lock held. * Returns request pointer on success, with @q->queue_lock *not held*. */ static struct request *__get_request(struct request_list *rl, unsigned int op, struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask) { struct request_queue *q = rl->q; struct request *rq; struct elevator_type *et = q->elevator->type; struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = op_is_sync(op); int may_queue; req_flags_t rq_flags = RQF_ALLOCED; lockdep_assert_held(q->queue_lock); if (unlikely(blk_queue_dying(q))) return ERR_PTR(-ENODEV); may_queue = elv_may_queue(q, op); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". * This process will be allowed to complete a batch of * requests, others will be blocked. */ if (!blk_rl_full(rl, is_sync)) { ioc_set_batching(q, ioc); blk_set_rl_full(rl, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { /* * The queue is full and the allocating * process is not a "batcher", and not * exempted by the IO scheduler */ return ERR_PTR(-ENOMEM); } } } blk_set_congested(rl, is_sync); } /* * Only allow batching queuers to allocate up to 50% over the defined * limit of requests, otherwise we could have thousands of requests * allocated with any setting of ->nr_requests */ if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) return ERR_PTR(-ENOMEM); q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; /* * Decide whether the new request will be managed by elevator. If * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will * prevent the current elevator from being destroyed until the new * request is freed. This guarantees icq's won't be destroyed and * makes creating new ones safe. * * Flush requests do not use the elevator so skip initialization. * This allows a request to share the flush and elevator data. * * Also, lookup icq while holding queue_lock. If it doesn't exist, * it will be created after releasing queue_lock. */ if (!op_is_flush(op) && !blk_queue_bypass(q)) { rq_flags |= RQF_ELVPRIV; q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } if (blk_queue_io_stat(q)) rq_flags |= RQF_IO_STAT; spin_unlock_irq(q->queue_lock); /* allocate and init request */ rq = mempool_alloc(rl->rq_pool, gfp_mask); if (!rq) goto fail_alloc; blk_rq_init(q, rq); blk_rq_set_rl(rq, rl); rq->cmd_flags = op; rq->rq_flags = rq_flags; if (flags & BLK_MQ_REQ_PREEMPT) rq->rq_flags |= RQF_PREEMPT; /* init elvpriv */ if (rq_flags & RQF_ELVPRIV) { if (unlikely(et->icq_cache && !icq)) { if (ioc) icq = ioc_create_icq(ioc, q, gfp_mask); if (!icq) goto fail_elvpriv; } rq->elv.icq = icq; if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) goto fail_elvpriv; /* @rq->elv.icq holds io_context until @rq is freed */ if (icq) get_io_context(icq->ioc); } out: /* * ioc may be NULL here, and ioc_batching will be false. That's * OK, if the queue is under the request limit then requests need * not count toward the nr_batch_requests limit. There will always * be some limit enforced by BLK_BATCH_TIME. */ if (ioc_batching(q, ioc)) ioc->nr_batch_requests--; trace_block_getrq(q, bio, op); return rq; fail_elvpriv: /* * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed * and may fail indefinitely under memory pressure and thus * shouldn't stall IO. Treat this request as !elvpriv. This will * disturb iosched and blkcg but weird is bettern than dead. */ printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", __func__, dev_name(q->backing_dev_info->dev)); rq->rq_flags &= ~RQF_ELVPRIV; rq->elv.icq = NULL; spin_lock_irq(q->queue_lock); q->nr_rqs_elvpriv--; spin_unlock_irq(q->queue_lock); goto out; fail_alloc: /* * Allocation failed presumably due to memory. Undo anything we * might have messed up. * * Allocating task should really be put onto the front of the wait * queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); freed_request(rl, is_sync, rq_flags); /* * in the very unlikely event that allocation failed and no * requests for this direction was pending, mark us starved so that * freeing of a request in the other direction will notice * us. another possible fix would be to split the rq mempool into * READ and WRITE */ rq_starved: if (unlikely(rl->count[is_sync] == 0)) rl->starved[is_sync] = 1; return ERR_PTR(-ENOMEM); } /** * get_request - get a free request * @q: request_queue to allocate request from * @op: operation and flags * @bio: bio to allocate request for (can be %NULL) * @flags: BLK_MQ_REQ_* flags. * @gfp: allocator flags * * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags, * this function keeps retrying under memory pressure and fails iff @q is dead. * * Must be called with @q->queue_lock held and, * Returns ERR_PTR on failure, with @q->queue_lock held. * Returns request pointer on success, with @q->queue_lock *not held*. */ static struct request *get_request(struct request_queue *q, unsigned int op, struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp) { const bool is_sync = op_is_sync(op); DEFINE_WAIT(wait); struct request_list *rl; struct request *rq; lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); rl = blk_get_rl(q, bio); /* transferred to @rq on success */ retry: rq = __get_request(rl, op, bio, flags, gfp); if (!IS_ERR(rq)) return rq; if (op & REQ_NOWAIT) { blk_put_rl(rl); return ERR_PTR(-EAGAIN); } if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) { blk_put_rl(rl); return rq; } /* wait on @rl and retry */ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, TASK_UNINTERRUPTIBLE); trace_block_sleeprq(q, bio, op); spin_unlock_irq(q->queue_lock); io_schedule(); /* * After sleeping, we become a "batching" process and will be able * to allocate at least one request, and up to a big batch of them * for a small period time. See ioc_batching, ioc_set_batching */ ioc_set_batching(q, current->io_context); spin_lock_irq(q->queue_lock); finish_wait(&rl->wait[is_sync], &wait); goto retry; } /* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */ static struct request *blk_old_get_request(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags) { struct request *rq; gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO; int ret = 0; WARN_ON_ONCE(q->mq_ops); /* create ioc upfront */ create_io_context(gfp_mask, q->node); ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); spin_lock_irq(q->queue_lock); rq = get_request(q, op, NULL, flags, gfp_mask); if (IS_ERR(rq)) { spin_unlock_irq(q->queue_lock); blk_queue_exit(q); return rq; } /* q->queue_lock is unlocked at this point */ rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; } /** * blk_get_request - allocate a request * @q: request queue to allocate a request for * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC. * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT. */ struct request *blk_get_request(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags) { struct request *req; WARN_ON_ONCE(op & REQ_NOWAIT); WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT)); if (q->mq_ops) { req = blk_mq_alloc_request(q, op, flags); if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) q->mq_ops->initialize_rq_fn(req); } else { req = blk_old_get_request(q, op, flags); if (!IS_ERR(req) && q->initialize_rq_fn) q->initialize_rq_fn(req); } return req; } EXPORT_SYMBOL(blk_get_request); /** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted * @rq: request to be inserted * * Description: * Drivers often keep queueing requests until the hardware cannot accept * more, when that condition happens we need to put the request back * on the queue. Must be called with queue lock held. */ void blk_requeue_request(struct request_queue *q, struct request *rq) { lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); blk_delete_timer(rq); blk_clear_rq_complete(rq); trace_block_rq_requeue(q, rq); rq_qos_requeue(q, rq); if (rq->rq_flags & RQF_QUEUED) blk_queue_end_tag(q, rq); BUG_ON(blk_queued_rq(rq)); elv_requeue_request(q, rq); } EXPORT_SYMBOL(blk_requeue_request); static void add_acct_request(struct request_queue *q, struct request *rq, int where) { blk_account_io_start(rq, true); __elv_add_request(q, rq, where); } static void part_round_stats_single(struct request_queue *q, int cpu, struct hd_struct *part, unsigned long now, unsigned int inflight) { if (inflight) { __part_stat_add(cpu, part, time_in_queue, inflight * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; } /** * part_round_stats() - Round off the performance stats on a struct disk_stats. * @q: target block queue * @cpu: cpu number for stats access * @part: target partition * * The average IO queue length and utilisation statistics are maintained * by observing the current state of the queue length and the amount of * time it has been in this state for. * * Normally, that accounting is done on IO completion, but that can result * in more than a second's worth of IO being accounted for within any one * second, leading to >100% utilisation. To deal with that, we call this * function to do a round-off before returning the results when reading * /proc/diskstats. This accounts immediately for all queue usage up to * the current jiffies and restarts the counters again. */ void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) { struct hd_struct *part2 = NULL; unsigned long now = jiffies; unsigned int inflight[2]; int stats = 0; if (part->stamp != now) stats |= 1; if (part->partno) { part2 = &part_to_disk(part)->part0; if (part2->stamp != now) stats |= 2; } if (!stats) return; part_in_flight(q, part, inflight); if (stats & 2) part_round_stats_single(q, cpu, part2, now, inflight[1]); if (stats & 1) part_round_stats_single(q, cpu, part, now, inflight[0]); } EXPORT_SYMBOL_GPL(part_round_stats); #ifdef CONFIG_PM static void blk_pm_put_request(struct request *rq) { if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) pm_runtime_mark_last_busy(rq->q->dev); } #else static inline void blk_pm_put_request(struct request *rq) {} #endif void __blk_put_request(struct request_queue *q, struct request *req) { req_flags_t rq_flags = req->rq_flags; if (unlikely(!q)) return; if (q->mq_ops) { blk_mq_free_request(req); return; } lockdep_assert_held(q->queue_lock); blk_req_zone_write_unlock(req); blk_pm_put_request(req); elv_completed_request(q, req); /* this is a bio leak */ WARN_ON(req->bio != NULL); rq_qos_done(q, req); /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools */ if (rq_flags & RQF_ALLOCED) { struct request_list *rl = blk_rq_rl(req); bool sync = op_is_sync(req->cmd_flags); BUG_ON(!list_empty(&req->queuelist)); BUG_ON(ELV_ON_HASH(req)); blk_free_request(rl, req); freed_request(rl, sync, rq_flags); blk_put_rl(rl); blk_queue_exit(q); } } EXPORT_SYMBOL_GPL(__blk_put_request); void blk_put_request(struct request *req) { struct request_queue *q = req->q; if (q->mq_ops) blk_mq_free_request(req); else { unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __blk_put_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); } } EXPORT_SYMBOL(blk_put_request); bool bio_attempt_back_merge(struct request_queue *q, struct request *req, struct bio *bio) { const int ff = bio->bi_opf & REQ_FAILFAST_MASK; if (!ll_back_merge_fn(q, req, bio)) return false; trace_block_bio_backmerge(q, req, bio); if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) blk_rq_set_mixed_merge(req); req->biotail->bi_next = bio; req->biotail = bio; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); blk_account_io_start(req, false); return true; } bool bio_attempt_front_merge(struct request_queue *q, struct request *req, struct bio *bio) { const int ff = bio->bi_opf & REQ_FAILFAST_MASK; if (!ll_front_merge_fn(q, req, bio)) return false; trace_block_bio_frontmerge(q, req, bio); if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) blk_rq_set_mixed_merge(req); bio->bi_next = req->bio; req->bio = bio; req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); blk_account_io_start(req, false); return true; } bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, struct bio *bio) { unsigned short segments = blk_rq_nr_discard_segments(req); if (segments >= queue_max_discard_segments(q)) goto no_merge; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, blk_rq_pos(req))) goto no_merge; req->biotail->bi_next = bio; req->biotail = bio; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->nr_phys_segments = segments + 1; blk_account_io_start(req, false); return true; no_merge: req_set_nomerge(q, req); return false; } /** * blk_attempt_plug_merge - try to merge with %current's plugged list * @q: request_queue new bio is being queued at * @bio: new bio being queued * @request_count: out parameter for number of traversed plugged requests * @same_queue_rq: pointer to &struct request that gets filled in when * another request associated with @q is found on the plug list * (optional, may be %NULL) * * Determine whether @bio being queued on @q can be merged with a request * on %current's plugged list. Returns %true if merge was successful, * otherwise %false. * * Plugging coalesces IOs from the same issuer for the same purpose without * going through @q->queue_lock. As such it's more of an issuing mechanism * than scheduling, and the request, while may have elvpriv data, is not * added on the elevator at this point. In addition, we don't have * reliable access to the elevator outside queue lock. Only check basic * merging parameters without querying the elevator. * * Caller must ensure !blk_queue_nomerges(q) beforehand. */ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int *request_count, struct request **same_queue_rq) { struct blk_plug *plug; struct request *rq; struct list_head *plug_list; plug = current->plug; if (!plug) return false; *request_count = 0; if (q->mq_ops) plug_list = &plug->mq_list; else plug_list = &plug->list; list_for_each_entry_reverse(rq, plug_list, queuelist) { bool merged = false; if (rq->q == q) { (*request_count)++; /* * Only blk-mq multiple hardware queues case checks the * rq in the same queue, there should be only one such * rq in a queue **/ if (same_queue_rq) *same_queue_rq = rq; } if (rq->q != q || !blk_rq_merge_ok(rq, bio)) continue; switch (blk_try_merge(rq, bio)) { case ELEVATOR_BACK_MERGE: merged = bio_attempt_back_merge(q, rq, bio); break; case ELEVATOR_FRONT_MERGE: merged = bio_attempt_front_merge(q, rq, bio); break; case ELEVATOR_DISCARD_MERGE: merged = bio_attempt_discard_merge(q, rq, bio); break; default: break; } if (merged) return true; } return false; } unsigned int blk_plug_queued_count(struct request_queue *q) { struct blk_plug *plug; struct request *rq; struct list_head *plug_list; unsigned int ret = 0; plug = current->plug; if (!plug) goto out; if (q->mq_ops) plug_list = &plug->mq_list; else plug_list = &plug->list; list_for_each_entry(rq, plug_list, queuelist) { if (rq->q == q) ret++; } out: return ret; } void blk_init_request_from_bio(struct request *req, struct bio *bio) { struct io_context *ioc = rq_ioc(bio); if (bio->bi_opf & REQ_RAHEAD) req->cmd_flags |= REQ_FAILFAST_MASK; req->__sector = bio->bi_iter.bi_sector; if (ioprio_valid(bio_prio(bio))) req->ioprio = bio_prio(bio); else if (ioc) req->ioprio = ioc->ioprio; else req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); req->write_hint = bio->bi_write_hint; blk_rq_bio_prep(req->q, req, bio); } EXPORT_SYMBOL_GPL(blk_init_request_from_bio); static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) { struct blk_plug *plug; int where = ELEVATOR_INSERT_SORT; struct request *req, *free; unsigned int request_count = 0; /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even * ISA dma in theory) */ blk_queue_bounce(q, &bio); blk_queue_split(q, &bio); if (!bio_integrity_prep(bio)) return BLK_QC_T_NONE; if (op_is_flush(bio->bi_opf)) { spin_lock_irq(q->queue_lock); where = ELEVATOR_INSERT_FLUSH; goto get_rq; } /* * Check if we can merge with the plugged list before grabbing * any locks. */ if (!blk_queue_nomerges(q)) { if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) return BLK_QC_T_NONE; } else request_count = blk_plug_queued_count(q); spin_lock_irq(q->queue_lock); switch (elv_merge(q, &req, bio)) { case ELEVATOR_BACK_MERGE: if (!bio_attempt_back_merge(q, req, bio)) break; elv_bio_merged(q, req, bio); free = attempt_back_merge(q, req); if (free) __blk_put_request(q, free); else elv_merged_request(q, req, ELEVATOR_BACK_MERGE); goto out_unlock; case ELEVATOR_FRONT_MERGE: if (!bio_attempt_front_merge(q, req, bio)) break; elv_bio_merged(q, req, bio); free = attempt_front_merge(q, req); if (free) __blk_put_request(q, free); else elv_merged_request(q, req, ELEVATOR_FRONT_MERGE); goto out_unlock; default: break; } get_rq: rq_qos_throttle(q, bio, q->queue_lock); /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ blk_queue_enter_live(q); req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO); if (IS_ERR(req)) { blk_queue_exit(q); rq_qos_cleanup(q, bio); if (PTR_ERR(req) == -ENOMEM) bio->bi_status = BLK_STS_RESOURCE; else bio->bi_status = BLK_STS_IOERR; bio_endio(bio); goto out_unlock; } rq_qos_track(q, req, bio); /* * After dropping the lock and possibly sleeping here, our request * may now be mergeable after it had proven unmergeable (above). * We don't worry about that case for efficiency. It won't happen * often, and the elevators are able to handle it. */ blk_init_request_from_bio(req, bio); if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) req->cpu = raw_smp_processor_id(); plug = current->plug; if (plug) { /* * If this is the first request added after a plug, fire * of a plug trace. * * @request_count may become stale because of schedule * out, so check plug list again. */ if (!request_count || list_empty(&plug->list)) trace_block_plug(q); else { struct request *last = list_entry_rq(plug->list.prev); if (request_count >= BLK_MAX_REQUEST_COUNT || blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) { blk_flush_plug_list(plug, false); trace_block_plug(q); } } list_add_tail(&req->queuelist, &plug->list); blk_account_io_start(req, true); } else { spin_lock_irq(q->queue_lock); add_acct_request(q, req, where); __blk_run_queue(q); out_unlock: spin_unlock_irq(q->queue_lock); } return BLK_QC_T_NONE; } static void handle_bad_sector(struct bio *bio, sector_t maxsector) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", bio_devname(bio, b), bio->bi_opf, (unsigned long long)bio_end_sector(bio), (long long)maxsector); } #ifdef CONFIG_FAIL_MAKE_REQUEST static DECLARE_FAULT_ATTR(fail_make_request); static int __init setup_fail_make_request(char *str) { return setup_fault_attr(&fail_make_request, str); } __setup("fail_make_request=", setup_fail_make_request); static bool should_fail_request(struct hd_struct *part, unsigned int bytes) { return part->make_it_fail && should_fail(&fail_make_request, bytes); } static int __init fail_make_request_debugfs(void) { struct dentry *dir = fault_create_debugfs_attr("fail_make_request", NULL, &fail_make_request); return PTR_ERR_OR_ZERO(dir); } late_initcall(fail_make_request_debugfs); #else /* CONFIG_FAIL_MAKE_REQUEST */ static inline bool should_fail_request(struct hd_struct *part, unsigned int bytes) { return false; } #endif /* CONFIG_FAIL_MAKE_REQUEST */ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) { if (part->policy && op_is_write(bio_op(bio))) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "generic_make_request: Trying to write " "to read-only block-device %s (partno %d)\n", bio_devname(bio, b), part->partno); return true; } return false; } static noinline int should_fail_bio(struct bio *bio) { if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) return -EIO; return 0; } ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); /* * Check whether this bio extends beyond the end of the device or partition. * This may well happen - the kernel calls bread() without checking the size of * the device, e.g., when mounting a file system. */ static inline int bio_check_eod(struct bio *bio, sector_t maxsector) { unsigned int nr_sectors = bio_sectors(bio); if (nr_sectors && maxsector && (nr_sectors > maxsector || bio->bi_iter.bi_sector > maxsector - nr_sectors)) { handle_bad_sector(bio, maxsector); return -EIO; } return 0; } /* * Remap block n of partition p to block n+start(p) of the disk. */ static inline int blk_partition_remap(struct bio *bio) { struct hd_struct *p; int ret = -EIO; rcu_read_lock(); p = __disk_get_part(bio->bi_disk, bio->bi_partno); if (unlikely(!p)) goto out; if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) goto out; if (unlikely(bio_check_ro(bio, p))) goto out; /* * Zone reset does not include bi_size so bio_sectors() is always 0. * Include a test for the reset op code and perform the remap if needed. */ if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) { if (bio_check_eod(bio, part_nr_sects_read(p))) goto out; bio->bi_iter.bi_sector += p->start_sect; trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), bio->bi_iter.bi_sector - p->start_sect); } bio->bi_partno = 0; ret = 0; out: rcu_read_unlock(); return ret; } static noinline_for_stack bool generic_make_request_checks(struct bio *bio) { struct request_queue *q; int nr_sectors = bio_sectors(bio); blk_status_t status = BLK_STS_IOERR; char b[BDEVNAME_SIZE]; might_sleep(); q = bio->bi_disk->queue; if (unlikely(!q)) { printk(KERN_ERR "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); goto end_io; } /* * For a REQ_NOWAIT based request, return -EOPNOTSUPP * if queue is not a request based queue. */ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) goto not_supported; if (should_fail_bio(bio)) goto end_io; if (bio->bi_partno) { if (unlikely(blk_partition_remap(bio))) goto end_io; } else { if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) goto end_io; if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) goto end_io; } /* * Filter flush bio's early so that make_request based * drivers without flush support don't have to worry * about them. */ if (op_is_flush(bio->bi_opf) && !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!nr_sectors) { status = BLK_STS_OK; goto end_io; } } switch (bio_op(bio)) { case REQ_OP_DISCARD: if (!blk_queue_discard(q)) goto not_supported; break; case REQ_OP_SECURE_ERASE: if (!blk_queue_secure_erase(q)) goto not_supported; break; case REQ_OP_WRITE_SAME: if (!q->limits.max_write_same_sectors) goto not_supported; break; case REQ_OP_ZONE_REPORT: case REQ_OP_ZONE_RESET: if (!blk_queue_is_zoned(q)) goto not_supported; break; case REQ_OP_WRITE_ZEROES: if (!q->limits.max_write_zeroes_sectors) goto not_supported; break; default: break; } /* * Various block parts want %current->io_context and lazy ioc * allocation ends up trading a lot of pain for a small amount of * memory. Just allocate it upfront. This may fail and block * layer knows how to live with it. */ create_io_context(GFP_ATOMIC, q->node); if (!blkcg_bio_issue_check(q, bio)) return false; if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { trace_block_bio_queue(q, bio); /* Now that enqueuing has been traced, we need to trace * completion as well. */ bio_set_flag(bio, BIO_TRACE_COMPLETION); } return true; not_supported: status = BLK_STS_NOTSUPP; end_io: bio->bi_status = status; bio_endio(bio); return false; } /** * generic_make_request - hand a buffer to its device driver for I/O * @bio: The bio describing the location in memory and on the device. * * generic_make_request() is used to make I/O requests of block * devices. It is passed a &struct bio, which describes the I/O that needs * to be done. * * generic_make_request() does not return any status. The * success/failure status of the request, along with notification of * completion, is delivered asynchronously through the bio->bi_end_io * function described (one day) else where. * * The caller of generic_make_request must make sure that bi_io_vec * are set to describe the memory buffer, and that bi_dev and bi_sector are * set to describe the device address, and the * bi_end_io and optionally bi_private are set to describe how * completion notification should be signaled. * * generic_make_request and the drivers it calls may use bi_next if this * bio happens to be merged with someone else, and may resubmit the bio to * a lower device by calling into generic_make_request recursively, which * means the bio should NOT be touched after the call to ->make_request_fn. */ blk_qc_t generic_make_request(struct bio *bio) { /* * bio_list_on_stack[0] contains bios submitted by the current * make_request_fn. * bio_list_on_stack[1] contains bios that were submitted before * the current make_request_fn, but that haven't been processed * yet. */ struct bio_list bio_list_on_stack[2]; blk_mq_req_flags_t flags = 0; struct request_queue *q = bio->bi_disk->queue; blk_qc_t ret = BLK_QC_T_NONE; if (bio->bi_opf & REQ_NOWAIT) flags = BLK_MQ_REQ_NOWAIT; if (bio_flagged(bio, BIO_QUEUE_ENTERED)) blk_queue_enter_live(q); else if (blk_queue_enter(q, flags) < 0) { if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT)) bio_wouldblock_error(bio); else bio_io_error(bio); return ret; } if (!generic_make_request_checks(bio)) goto out; /* * We only want one ->make_request_fn to be active at a time, else * stack usage with stacked devices could be a problem. So use * current->bio_list to keep a list of requests submited by a * make_request_fn function. current->bio_list is also used as a * flag to say if generic_make_request is currently active in this * task or not. If it is NULL, then no make_request is active. If * it is non-NULL, then a make_request is active, and new requests * should be added at the tail */ if (current->bio_list) { bio_list_add(&current->bio_list[0], bio); goto out; } /* following loop may be a bit non-obvious, and so deserves some * explanation. * Before entering the loop, bio->bi_next is NULL (as all callers * ensure that) so we have a list with a single bio. * We pretend that we have just taken it off a longer list, so * we assign bio_list to a pointer to the bio_list_on_stack, * thus initialising the bio_list of new bios to be * added. ->make_request() may indeed add some more bios * through a recursive call to generic_make_request. If it * did, we find a non-NULL value in bio_list and re-enter the loop * from the top. In this case we really did just take the bio * of the top of the list (no pretending) and so remove it from * bio_list, and call into ->make_request() again. */ BUG_ON(bio->bi_next); bio_list_init(&bio_list_on_stack[0]); current->bio_list = bio_list_on_stack; do { bool enter_succeeded = true; if (unlikely(q != bio->bi_disk->queue)) { if (q) blk_queue_exit(q); q = bio->bi_disk->queue; flags = 0; if (bio->bi_opf & REQ_NOWAIT) flags = BLK_MQ_REQ_NOWAIT; if (blk_queue_enter(q, flags) < 0) { enter_succeeded = false; q = NULL; } } if (enter_succeeded) { struct bio_list lower, same; /* Create a fresh bio_list for all subordinate requests */ bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); ret = q->make_request_fn(q, bio); /* sort new bios into those for a lower level * and those for the same level */ bio_list_init(&lower); bio_list_init(&same); while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) if (q == bio->bi_disk->queue) bio_list_add(&same, bio); else bio_list_add(&lower, bio); /* now assemble so we handle the lowest level first */ bio_list_merge(&bio_list_on_stack[0], &lower); bio_list_merge(&bio_list_on_stack[0], &same); bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]); } else { if (unlikely(!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))) bio_wouldblock_error(bio); else bio_io_error(bio); } bio = bio_list_pop(&bio_list_on_stack[0]); } while (bio); current->bio_list = NULL; /* deactivate */ out: if (q) blk_queue_exit(q); return ret; } EXPORT_SYMBOL(generic_make_request); /** * direct_make_request - hand a buffer directly to its device driver for I/O * @bio: The bio describing the location in memory and on the device. * * This function behaves like generic_make_request(), but does not protect * against recursion. Must only be used if the called driver is known * to not call generic_make_request (or direct_make_request) again from * its make_request function. (Calling direct_make_request again from * a workqueue is perfectly fine as that doesn't recurse). */ blk_qc_t direct_make_request(struct bio *bio) { struct request_queue *q = bio->bi_disk->queue; bool nowait = bio->bi_opf & REQ_NOWAIT; blk_qc_t ret; if (!generic_make_request_checks(bio)) return BLK_QC_T_NONE; if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { if (nowait && !blk_queue_dying(q)) bio->bi_status = BLK_STS_AGAIN; else bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; } ret = q->make_request_fn(q, bio); blk_queue_exit(q); return ret; } EXPORT_SYMBOL_GPL(direct_make_request); /** * submit_bio - submit a bio to the block device layer for I/O * @bio: The &struct bio which describes the I/O * * submit_bio() is very similar in purpose to generic_make_request(), and * uses that function to do most of the work. Both are fairly rough * interfaces; @bio must be presetup and ready for I/O. * */ blk_qc_t submit_bio(struct bio *bio) { /* * If it's a regular read/write or a barrier with data attached, * go through the normal accounting stuff before submission. */ if (bio_has_data(bio)) { unsigned int count; if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) count = queue_logical_block_size(bio->bi_disk->queue) >> 9; else count = bio_sectors(bio); if (op_is_write(bio_op(bio))) { count_vm_events(PGPGOUT, count); } else { task_io_account_read(bio->bi_iter.bi_size); count_vm_events(PGPGIN, count); } if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), op_is_write(bio_op(bio)) ? "WRITE" : "READ", (unsigned long long)bio->bi_iter.bi_sector, bio_devname(bio, b), count); } } return generic_make_request(bio); } EXPORT_SYMBOL(submit_bio); bool blk_poll(struct request_queue *q, blk_qc_t cookie) { if (!q->poll_fn || !blk_qc_t_valid(cookie)) return false; if (current->plug) blk_flush_plug_list(current->plug, false); return q->poll_fn(q, cookie); } EXPORT_SYMBOL_GPL(blk_poll); /** * blk_cloned_rq_check_limits - Helper function to check a cloned request * for new the queue limits * @q: the queue * @rq: the request being checked * * Description: * @rq may have been made based on weaker limitations of upper-level queues * in request stacking drivers, and it may violate the limitation of @q. * Since the block layer and the underlying device driver trust @rq * after it is inserted to @q, it should be checked against @q before * the insertion using this generic function. * * Request stacking drivers like request-based dm may change the queue * limits when retrying requests on other queues. Those requests need * to be checked against the new queue limits again during dispatch. */ static int blk_cloned_rq_check_limits(struct request_queue *q, struct request *rq) { if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } /* * queue's settings related to segment counting like q->bounce_pfn * may differ from that of other stacking queues. * Recalculate it to check the request correctly on this queue's * limitation. */ blk_recalc_rq_segments(rq); if (rq->nr_phys_segments > queue_max_segments(q)) { printk(KERN_ERR "%s: over max segments limit.\n", __func__); return -EIO; } return 0; } /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request * @q: the queue to submit the request * @rq: the request being queued */ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) { unsigned long flags; int where = ELEVATOR_INSERT_BACK; if (blk_cloned_rq_check_limits(q, rq)) return BLK_STS_IOERR; if (rq->rq_disk && should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) return BLK_STS_IOERR; if (q->mq_ops) { if (blk_queue_io_stat(q)) blk_account_io_start(rq, true); /* * Since we have a scheduler attached on the top device, * bypass a potential scheduler on the bottom device for * insert. */ return blk_mq_request_issue_directly(rq); } spin_lock_irqsave(q->queue_lock, flags); if (unlikely(blk_queue_dying(q))) { spin_unlock_irqrestore(q->queue_lock, flags); return BLK_STS_IOERR; } /* * Submitting request must be dequeued before calling this function * because it will be linked to another request_queue */ BUG_ON(blk_queued_rq(rq)); if (op_is_flush(rq->cmd_flags)) where = ELEVATOR_INSERT_FLUSH; add_acct_request(q, rq, where); if (where == ELEVATOR_INSERT_FLUSH) __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); return BLK_STS_OK; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); /** * blk_rq_err_bytes - determine number of bytes till the next failure boundary * @rq: request to examine * * Description: * A request could be merge of IOs which require different failure * handling. This function determines the number of bytes which * can be failed from the beginning of the request without * crossing into area which need to be retried further. * * Return: * The number of bytes to fail. */ unsigned int blk_rq_err_bytes(const struct request *rq) { unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; unsigned int bytes = 0; struct bio *bio; if (!(rq->rq_flags & RQF_MIXED_MERGE)) return blk_rq_bytes(rq); /* * Currently the only 'mixing' which can happen is between * different fastfail types. We can safely fail portions * which have all the failfast bits that the first one has - * the ones which are at least as eager to fail as the first * one. */ for (bio = rq->bio; bio; bio = bio->bi_next) { if ((bio->bi_opf & ff) != ff) break; bytes += bio->bi_iter.bi_size; } /* this could lead to infinite loop */ BUG_ON(blk_rq_bytes(rq) && !bytes); return bytes; } EXPORT_SYMBOL_GPL(blk_rq_err_bytes); void blk_account_io_completion(struct request *req, unsigned int bytes) { if (blk_do_io_stat(req)) { const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; int cpu; cpu = part_stat_lock(); part = req->part; part_stat_add(cpu, part, sectors[sgrp], bytes >> 9); part_stat_unlock(); } } void blk_account_io_done(struct request *req, u64 now) { /* * Account IO completion. flush_rq isn't accounted as a * normal IO on queueing nor completion. Accounting the * containing request is enough. */ if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { unsigned long duration; const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; int cpu; duration = nsecs_to_jiffies(now - req->start_time_ns); cpu = part_stat_lock(); part = req->part; part_stat_inc(cpu, part, ios[sgrp]); part_stat_add(cpu, part, ticks[sgrp], duration); part_round_stats(req->q, cpu, part); part_dec_in_flight(req->q, part, rq_data_dir(req)); hd_struct_put(part); part_stat_unlock(); } } #ifdef CONFIG_PM /* * Don't process normal requests when queue is suspended * or in the process of suspending/resuming */ static bool blk_pm_allow_request(struct request *rq) { switch (rq->q->rpm_status) { case RPM_RESUMING: case RPM_SUSPENDING: return rq->rq_flags & RQF_PM; case RPM_SUSPENDED: return false; default: return true; } } #else static bool blk_pm_allow_request(struct request *rq) { return true; } #endif void blk_account_io_start(struct request *rq, bool new_io) { struct hd_struct *part; int rw = rq_data_dir(rq); int cpu; if (!blk_do_io_stat(rq)) return; cpu = part_stat_lock(); if (!new_io) { part = rq->part; part_stat_inc(cpu, part, merges[rw]); } else { part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); if (!hd_struct_try_get(part)) { /* * The partition is already being removed, * the request will be accounted on the disk only * * We take a reference on disk->part0 although that * partition will never be deleted, so we can treat * it as any other partition. */ part = &rq->rq_disk->part0; hd_struct_get(part); } part_round_stats(rq->q, cpu, part); part_inc_in_flight(rq->q, part, rw); rq->part = part; } part_stat_unlock(); } static struct request *elv_next_request(struct request_queue *q) { struct request *rq; struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); WARN_ON_ONCE(q->mq_ops); while (1) { list_for_each_entry(rq, &q->queue_head, queuelist) { if (blk_pm_allow_request(rq)) return rq; if (rq->rq_flags & RQF_SOFTBARRIER) break; } /* * Flush request is running and flush request isn't queueable * in the drive, we can hold the queue till flush request is * finished. Even we don't do this, driver can't dispatch next * requests and will requeue them. And this can improve * throughput too. For example, we have request flush1, write1, * flush 2. flush1 is dispatched, then queue is hold, write1 * isn't inserted to queue. After flush1 is finished, flush2 * will be dispatched. Since disk cache is already clean, * flush2 will be finished very soon, so looks like flush2 is * folded to flush1. * Since the queue is hold, a flag is set to indicate the queue * should be restarted later. Please see flush_end_io() for * details. */ if (fq->flush_pending_idx != fq->flush_running_idx && !queue_flush_queueable(q)) { fq->flush_queue_delayed = 1; return NULL; } if (unlikely(blk_queue_bypass(q)) || !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) return NULL; } } /** * blk_peek_request - peek at the top of a request queue * @q: request queue to peek at * * Description: * Return the request at the top of @q. The returned request * should be started using blk_start_request() before LLD starts * processing it. * * Return: * Pointer to the request at the top of @q if available. Null * otherwise. */ struct request *blk_peek_request(struct request_queue *q) { struct request *rq; int ret; lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); while ((rq = elv_next_request(q)) != NULL) { if (!(rq->rq_flags & RQF_STARTED)) { /* * This is the first time the device driver * sees this request (possibly after * requeueing). Notify IO scheduler. */ if (rq->rq_flags & RQF_SORTED) elv_activate_rq(q, rq); /* * just mark as started even if we don't start * it, a request that has been delayed should * not be passed by new incoming requests */ rq->rq_flags |= RQF_STARTED; trace_block_rq_issue(q, rq); } if (!q->boundary_rq || q->boundary_rq == rq) { q->end_sector = rq_end_sector(rq); q->boundary_rq = NULL; } if (rq->rq_flags & RQF_DONTPREP) break; if (q->dma_drain_size && blk_rq_bytes(rq)) { /* * make sure space for the drain appears we * know we can do this because max_hw_segments * has been adjusted to be one fewer than the * device can handle */ rq->nr_phys_segments++; } if (!q->prep_rq_fn) break; ret = q->prep_rq_fn(q, rq); if (ret == BLKPREP_OK) { break; } else if (ret == BLKPREP_DEFER) { /* * the request may have been (partially) prepped. * we need to keep this request in the front to * avoid resource deadlock. RQF_STARTED will * prevent other fs requests from passing this one. */ if (q->dma_drain_size && blk_rq_bytes(rq) && !(rq->rq_flags & RQF_DONTPREP)) { /* * remove the space for the drain we added * so that we don't add it again */ --rq->nr_phys_segments; } rq = NULL; break; } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { rq->rq_flags |= RQF_QUIET; /* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); __blk_end_request_all(rq, ret == BLKPREP_INVALID ? BLK_STS_TARGET : BLK_STS_IOERR); } else { printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; } } return rq; } EXPORT_SYMBOL(blk_peek_request); static void blk_dequeue_request(struct request *rq) { struct request_queue *q = rq->q; BUG_ON(list_empty(&rq->queuelist)); BUG_ON(ELV_ON_HASH(rq)); list_del_init(&rq->queuelist); /* * the time frame between a request being removed from the lists * and to it is freed is accounted as io that is in progress at * the driver side. */ if (blk_account_rq(rq)) q->in_flight[rq_is_sync(rq)]++; } /** * blk_start_request - start request processing on the driver * @req: request to dequeue * * Description: * Dequeue @req and start timeout timer on it. This hands off the * request to the driver. */ void blk_start_request(struct request *req) { lockdep_assert_held(req->q->queue_lock); WARN_ON_ONCE(req->q->mq_ops); blk_dequeue_request(req); if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { req->io_start_time_ns = ktime_get_ns(); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW req->throtl_size = blk_rq_sectors(req); #endif req->rq_flags |= RQF_STATS; rq_qos_issue(req->q, req); } BUG_ON(blk_rq_is_complete(req)); blk_add_timer(req); } EXPORT_SYMBOL(blk_start_request); /** * blk_fetch_request - fetch a request from a request queue * @q: request queue to fetch a request from * * Description: * Return the request at the top of @q. The request is started on * return and LLD can start processing it immediately. * * Return: * Pointer to the request at the top of @q if available. Null * otherwise. */ struct request *blk_fetch_request(struct request_queue *q) { struct request *rq; lockdep_assert_held(q->queue_lock); WARN_ON_ONCE(q->mq_ops); rq = blk_peek_request(q); if (rq) blk_start_request(rq); return rq; } EXPORT_SYMBOL(blk_fetch_request); /* * Steal bios from a request and add them to a bio list. * The request must not have been partially completed before. */ void blk_steal_bios(struct bio_list *list, struct request *rq) { if (rq->bio) { if (list->tail) list->tail->bi_next = rq->bio; else list->head = rq->bio; list->tail = rq->biotail; rq->bio = NULL; rq->biotail = NULL; } rq->__data_len = 0; } EXPORT_SYMBOL_GPL(blk_steal_bios); /** * blk_update_request - Special helper function for request stacking drivers * @req: the request being processed * @error: block status code * @nr_bytes: number of bytes to complete @req * * Description: * Ends I/O on a number of bytes attached to @req, but doesn't complete * the request structure even if @req doesn't have leftover. * If @req has leftover, sets it up for the next range of segments. * * This special helper function is only for request stacking drivers * (e.g. request-based dm) so that they can handle partial completion. * Actual device drivers should use blk_end_request instead. * * Passing the result of blk_rq_bytes() as @nr_bytes guarantees * %false return from this function. * * Note: * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both * blk_rq_bytes() and in blk_update_request(). * * Return: * %false - this request doesn't have any more data * %true - this request has more data **/ bool blk_update_request(struct request *req, blk_status_t error, unsigned int nr_bytes) { int total_bytes; trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); if (!req->bio) return false; if (unlikely(error && !blk_rq_is_passthrough(req) && !(req->rq_flags & RQF_QUIET))) print_req_error(req, error); blk_account_io_completion(req, nr_bytes); total_bytes = 0; while (req->bio) { struct bio *bio = req->bio; unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; /* Completion has already been traced */ bio_clear_flag(bio, BIO_TRACE_COMPLETION); req_bio_endio(req, bio, bio_bytes, error); total_bytes += bio_bytes; nr_bytes -= bio_bytes; if (!nr_bytes) break; } /* * completely done */ if (!req->bio) { /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request * later. */ req->__data_len = 0; return false; } req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ if (!blk_rq_is_passthrough(req)) req->__sector += total_bytes >> 9; /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { req->cmd_flags &= ~REQ_FAILFAST_MASK; req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; } if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { /* * If total number of sectors is less than the first segment * size, something has gone terribly wrong. */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { blk_dump_rq_flags(req, "request botched"); req->__data_len = blk_rq_cur_bytes(req); } /* recalculate the number of segments */ blk_recalc_rq_segments(req); } return true; } EXPORT_SYMBOL_GPL(blk_update_request); static bool blk_update_bidi_request(struct request *rq, blk_status_t error, unsigned int nr_bytes, unsigned int bidi_bytes) { if (blk_update_request(rq, error, nr_bytes)) return true; /* Bidi request must be completed as a whole */ if (unlikely(blk_bidi_rq(rq)) && blk_update_request(rq->next_rq, error, bidi_bytes)) return true; if (blk_queue_add_random(rq->q)) add_disk_randomness(rq->rq_disk); return false; } /** * blk_unprep_request - unprepare a request * @req: the request * * This function makes a request ready for complete resubmission (or * completion). It happens only after all error handling is complete, * so represents the appropriate moment to deallocate any resources * that were allocated to the request in the prep_rq_fn. The queue * lock is held when calling this. */ void blk_unprep_request(struct request *req) { struct request_queue *q = req->q; req->rq_flags &= ~RQF_DONTPREP; if (q->unprep_rq_fn) q->unprep_rq_fn(q, req); } EXPORT_SYMBOL_GPL(blk_unprep_request); void blk_finish_request(struct request *req, blk_status_t error) { struct request_queue *q = req->q; u64 now = ktime_get_ns(); lockdep_assert_held(req->q->queue_lock); WARN_ON_ONCE(q->mq_ops); if (req->rq_flags & RQF_STATS) blk_stat_add(req, now); if (req->rq_flags & RQF_QUEUED) blk_queue_end_tag(q, req); BUG_ON(blk_queued_rq(req)); if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) laptop_io_completion(req->q->backing_dev_info); blk_delete_timer(req); if (req->rq_flags & RQF_DONTPREP) blk_unprep_request(req); blk_account_io_done(req, now); if (req->end_io) { rq_qos_done(q, req); req->end_io(req, error); } else { if (blk_bidi_rq(req)) __blk_put_request(req->next_rq->q, req->next_rq); __blk_put_request(q, req); } } EXPORT_SYMBOL(blk_finish_request); /** * blk_end_bidi_request - Complete a bidi request * @rq: the request to complete * @error: block status code * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. * Drivers that supports bidi can safely call this member for any * type of request, bidi or uni. In the later case @bidi_bytes is * just ignored. * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ static bool blk_end_bidi_request(struct request *rq, blk_status_t error, unsigned int nr_bytes, unsigned int bidi_bytes) { struct request_queue *q = rq->q; unsigned long flags; WARN_ON_ONCE(q->mq_ops); if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; spin_lock_irqsave(q->queue_lock, flags); blk_finish_request(rq, error); spin_unlock_irqrestore(q->queue_lock, flags); return false; } /** * __blk_end_bidi_request - Complete a bidi request with queue lock held * @rq: the request to complete * @error: block status code * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: * Identical to blk_end_bidi_request() except that queue lock is * assumed to be locked on entry and remains so on return. * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ static bool __blk_end_bidi_request(struct request *rq, blk_status_t error, unsigned int nr_bytes, unsigned int bidi_bytes) { lockdep_assert_held(rq->q->queue_lock); WARN_ON_ONCE(rq->q->mq_ops); if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; blk_finish_request(rq, error); return false; } /** * blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed * @error: block status code * @nr_bytes: number of bytes to complete * * Description: * Ends I/O on a number of bytes attached to @rq. * If @rq has leftover, sets it up for the next range of segments. * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ bool blk_end_request(struct request *rq, blk_status_t error, unsigned int nr_bytes) { WARN_ON_ONCE(rq->q->mq_ops); return blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL(blk_end_request); /** * blk_end_request_all - Helper function for drives to finish the request. * @rq: the request to finish * @error: block status code * * Description: * Completely finish @rq. */ void blk_end_request_all(struct request *rq, blk_status_t error) { bool pending; unsigned int bidi_bytes = 0; if (unlikely(blk_bidi_rq(rq))) bidi_bytes = blk_rq_bytes(rq->next_rq); pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } EXPORT_SYMBOL(blk_end_request_all); /** * __blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed * @error: block status code * @nr_bytes: number of bytes to complete * * Description: * Must be called with queue lock held unlike blk_end_request(). * * Return: * %false - we are done with this request * %true - still buffers pending for this request **/ bool __blk_end_request(struct request *rq, blk_status_t error, unsigned int nr_bytes) { lockdep_assert_held(rq->q->queue_lock); WARN_ON_ONCE(rq->q->mq_ops); return __blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL(__blk_end_request); /** * __blk_end_request_all - Helper function for drives to finish the request. * @rq: the request to finish * @error: block status code * * Description: * Completely finish @rq. Must be called with queue lock held. */ void __blk_end_request_all(struct request *rq, blk_status_t error) { bool pending; unsigned int bidi_bytes = 0; lockdep_assert_held(rq->q->queue_lock); WARN_ON_ONCE(rq->q->mq_ops); if (unlikely(blk_bidi_rq(rq))) bidi_bytes = blk_rq_bytes(rq->next_rq); pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } EXPORT_SYMBOL(__blk_end_request_all); /** * __blk_end_request_cur - Helper function to finish the current request chunk. * @rq: the request to finish the current chunk for * @error: block status code * * Description: * Complete the current consecutively mapped chunk from @rq. Must * be called with queue lock held. * * Return: * %false - we are done with this request * %true - still buffers pending for this request */ bool __blk_end_request_cur(struct request *rq, blk_status_t error) { return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } EXPORT_SYMBOL(__blk_end_request_cur); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) { if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); else if (bio_op(bio) == REQ_OP_DISCARD) rq->nr_phys_segments = 1; rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; if (bio->bi_disk) rq->rq_disk = bio->bi_disk; } #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE /** * rq_flush_dcache_pages - Helper function to flush all pages in a request * @rq: the request to be flushed * * Description: * Flush all pages in @rq. */ void rq_flush_dcache_pages(struct request *rq) { struct req_iterator iter; struct bio_vec bvec; rq_for_each_segment(bvec, rq, iter) flush_dcache_page(bvec.bv_page); } EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); #endif /** * blk_lld_busy - Check if underlying low-level drivers of a device are busy * @q : the queue of the device being checked * * Description: * Check if underlying low-level drivers of a device are busy. * If the drivers want to export their busy state, they must set own * exporting function using blk_queue_lld_busy() first. * * Basically, this function is used only by request stacking drivers * to stop dispatching requests to underlying devices when underlying * devices are busy. This behavior helps more I/O merging on the queue * of the request stacking driver and prevents I/O throughput regression * on burst I/O load. * * Return: * 0 - Not busy (The request stacking driver should dispatch request) * 1 - Busy (The request stacking driver should stop dispatching request) */ int blk_lld_busy(struct request_queue *q) { if (q->lld_busy_fn) return q->lld_busy_fn(q); return 0; } EXPORT_SYMBOL_GPL(blk_lld_busy); /** * blk_rq_unprep_clone - Helper function to free all bios in a cloned request * @rq: the clone request to be cleaned up * * Description: * Free all bios in @rq for a cloned request. */ void blk_rq_unprep_clone(struct request *rq) { struct bio *bio; while ((bio = rq->bio) != NULL) { rq->bio = bio->bi_next; bio_put(bio); } } EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /* * Copy attributes of the original request to the clone request. * The actual data parts (e.g. ->cmd, ->sense) are not copied. */ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { dst->rq_flags |= RQF_SPECIAL_PAYLOAD; dst->special_vec = src->special_vec; } dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; } /** * blk_rq_prep_clone - Helper function to setup clone request * @rq: the request to be setup * @rq_src: original request to be cloned * @bs: bio_set that bios for clone are allocated from * @gfp_mask: memory allocation mask for bio * @bio_ctr: setup function to be called for each clone bio. * Returns %0 for success, non %0 for failure. * @data: private data to be passed to @bio_ctr * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. * The actual data parts of @rq_src (e.g. ->cmd, ->sense) * are not copied, and copying such parts is the caller's responsibility. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. * So cloned bios must be completed before original bios, which means * the caller must complete @rq before @rq_src. */ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) { struct bio *bio, *bio_src; if (!bs) bs = &fs_bio_set; __rq_for_each_bio(bio_src, rq_src) { bio = bio_clone_fast(bio_src, gfp_mask, bs); if (!bio) goto free_and_out; if (bio_ctr && bio_ctr(bio, bio_src, data)) goto free_and_out; if (rq->bio) { rq->biotail->bi_next = bio; rq->biotail = bio; } else rq->bio = rq->biotail = bio; } __blk_rq_prep_clone(rq, rq_src); return 0; free_and_out: if (bio) bio_put(bio); blk_rq_unprep_clone(rq); return -ENOMEM; } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); int kblockd_schedule_work_on(int cpu, struct work_struct *work) { return queue_work_on(cpu, kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work_on); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); } EXPORT_SYMBOL(kblockd_mod_delayed_work_on); /** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized * * Description: * Tracking blk_plug inside the task_struct will help with auto-flushing the * pending I/O should the task end up blocking between blk_start_plug() and * blk_finish_plug(). This is important from a performance perspective, but * also ensures that we don't deadlock. For instance, if the task is blocking * for a memory allocation, memory reclaim could end up wanting to free a * page belonging to that request that is currently residing in our private * plug. By flushing the pending I/O when the process goes to sleep, we avoid * this kind of deadlock. */ void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; /* * If this is a nested plug, don't actually assign it. */ if (tsk->plug) return; INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->mq_list); INIT_LIST_HEAD(&plug->cb_list); /* * Store ordering should not be needed here, since a potential * preempt will imply a full memory barrier */ tsk->plug = plug; } EXPORT_SYMBOL(blk_start_plug); static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) { struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist); return !(rqa->q < rqb->q || (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); } /* * If 'from_schedule' is true, then postpone the dispatch of requests * until a safe kblockd context. We due this to avoid accidental big * additional stack usage in driver dispatch, in places where the originally * plugger did not intend it. */ static void queue_unplugged(struct request_queue *q, unsigned int depth, bool from_schedule) __releases(q->queue_lock) { lockdep_assert_held(q->queue_lock); trace_block_unplug(q, depth, !from_schedule); if (from_schedule) blk_run_queue_async(q); else __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); while (!list_empty(&plug->cb_list)) { list_splice_init(&plug->cb_list, &callbacks); while (!list_empty(&callbacks)) { struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); list_del(&cb->list); cb->callback(cb, from_schedule); } } } struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, int size) { struct blk_plug *plug = current->plug; struct blk_plug_cb *cb; if (!plug) return NULL; list_for_each_entry(cb, &plug->cb_list, list) if (cb->callback == unplug && cb->data == data) return cb; /* Not currently on the callback list */ BUG_ON(size < sizeof(*cb)); cb = kzalloc(size, GFP_ATOMIC); if (cb) { cb->data = data; cb->callback = unplug; list_add(&cb->list, &plug->cb_list); } return cb; } EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; struct request *rq; LIST_HEAD(list); unsigned int depth; flush_plug_callbacks(plug, from_schedule); if (!list_empty(&plug->mq_list)) blk_mq_flush_plug_list(plug, from_schedule); if (list_empty(&plug->list)) return; list_splice_init(&plug->list, &list); list_sort(NULL, &list, plug_rq_cmp); q = NULL; depth = 0; while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); BUG_ON(!rq->q); if (rq->q != q) { /* * This drops the queue lock */ if (q) queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; spin_lock_irq(q->queue_lock); } /* * Short-circuit if @q is dead */ if (unlikely(blk_queue_dying(q))) { __blk_end_request_all(rq, BLK_STS_IOERR); continue; } /* * rq is already accounted, so use raw insert */ if (op_is_flush(rq->cmd_flags)) __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); else __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); depth++; } /* * This drops the queue lock */ if (q) queue_unplugged(q, depth, from_schedule); } void blk_finish_plug(struct blk_plug *plug) { if (plug != current->plug) return; blk_flush_plug_list(plug, false); current->plug = NULL; } EXPORT_SYMBOL(blk_finish_plug); #ifdef CONFIG_PM /** * blk_pm_runtime_init - Block layer runtime PM initialization routine * @q: the queue of the device * @dev: the device the queue belongs to * * Description: * Initialize runtime-PM-related fields for @q and start auto suspend for * @dev. Drivers that want to take advantage of request-based runtime PM * should call this function after @dev has been initialized, and its * request queue @q has been allocated, and runtime PM for it can not happen * yet(either due to disabled/forbidden or its usage_count > 0). In most * cases, driver should call this function before any I/O has taken place. * * This function takes care of setting up using auto suspend for the device, * the autosuspend delay is set to -1 to make runtime suspend impossible * until an updated value is either set by user or by driver. Drivers do * not need to touch other autosuspend settings. * * The block layer runtime PM is request based, so only works for drivers * that use request as their IO unit instead of those directly use bio's. */ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) { /* not support for RQF_PM and ->rpm_status in blk-mq yet */ if (q->mq_ops) return; q->dev = dev; q->rpm_status = RPM_ACTIVE; pm_runtime_set_autosuspend_delay(q->dev, -1); pm_runtime_use_autosuspend(q->dev); } EXPORT_SYMBOL(blk_pm_runtime_init); /** * blk_pre_runtime_suspend - Pre runtime suspend check * @q: the queue of the device * * Description: * This function will check if runtime suspend is allowed for the device * by examining if there are any requests pending in the queue. If there * are requests pending, the device can not be runtime suspended; otherwise, * the queue's status will be updated to SUSPENDING and the driver can * proceed to suspend the device. * * For the not allowed case, we mark last busy for the device so that * runtime PM core will try to autosuspend it some time later. * * This function should be called near the start of the device's * runtime_suspend callback. * * Return: * 0 - OK to runtime suspend the device * -EBUSY - Device should not be runtime suspended */ int blk_pre_runtime_suspend(struct request_queue *q) { int ret = 0; if (!q->dev) return ret; spin_lock_irq(q->queue_lock); if (q->nr_pending) { ret = -EBUSY; pm_runtime_mark_last_busy(q->dev); } else { q->rpm_status = RPM_SUSPENDING; } spin_unlock_irq(q->queue_lock); return ret; } EXPORT_SYMBOL(blk_pre_runtime_suspend); /** * blk_post_runtime_suspend - Post runtime suspend processing * @q: the queue of the device * @err: return value of the device's runtime_suspend function * * Description: * Update the queue's runtime status according to the return value of the * device's runtime suspend function and mark last busy for the device so * that PM core will try to auto suspend the device at a later time. * * This function should be called near the end of the device's * runtime_suspend callback. */ void blk_post_runtime_suspend(struct request_queue *q, int err) { if (!q->dev) return; spin_lock_irq(q->queue_lock); if (!err) { q->rpm_status = RPM_SUSPENDED; } else { q->rpm_status = RPM_ACTIVE; pm_runtime_mark_last_busy(q->dev); } spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL(blk_post_runtime_suspend); /** * blk_pre_runtime_resume - Pre runtime resume processing * @q: the queue of the device * * Description: * Update the queue's runtime status to RESUMING in preparation for the * runtime resume of the device. * * This function should be called near the start of the device's * runtime_resume callback. */ void blk_pre_runtime_resume(struct request_queue *q) { if (!q->dev) return; spin_lock_irq(q->queue_lock); q->rpm_status = RPM_RESUMING; spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL(blk_pre_runtime_resume); /** * blk_post_runtime_resume - Post runtime resume processing * @q: the queue of the device * @err: return value of the device's runtime_resume function * * Description: * Update the queue's runtime status according to the return value of the * device's runtime_resume function. If it is successfully resumed, process * the requests that are queued into the device's queue when it is resuming * and then mark last busy and initiate autosuspend for it. * * This function should be called near the end of the device's * runtime_resume callback. */ void blk_post_runtime_resume(struct request_queue *q, int err) { if (!q->dev) return; spin_lock_irq(q->queue_lock); if (!err) { q->rpm_status = RPM_ACTIVE; __blk_run_queue(q); pm_runtime_mark_last_busy(q->dev); pm_request_autosuspend(q->dev); } else { q->rpm_status = RPM_SUSPENDED; } spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL(blk_post_runtime_resume); /** * blk_set_runtime_active - Force runtime status of the queue to be active * @q: the queue of the device * * If the device is left runtime suspended during system suspend the resume * hook typically resumes the device and corrects runtime status * accordingly. However, that does not affect the queue runtime PM status * which is still "suspended". This prevents processing requests from the * queue. * * This function can be used in driver's resume hook to correct queue * runtime PM status and re-enable peeking requests from the queue. It * should be called before first request is added to the queue. */ void blk_set_runtime_active(struct request_queue *q) { spin_lock_irq(q->queue_lock); q->rpm_status = RPM_ACTIVE; pm_runtime_mark_last_busy(q->dev); pm_request_autosuspend(q->dev); spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL(blk_set_runtime_active); #endif int __init blk_dev_init(void) { BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * FIELD_SIZEOF(struct request, cmd_flags)); BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * FIELD_SIZEOF(struct bio, bi_opf)); /* used for unplugging and affects IO latency/throughput - HIGHPRI */ kblockd_workqueue = alloc_workqueue("kblockd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!kblockd_workqueue) panic("Failed to create kblockd\n"); request_cachep = kmem_cache_create("blkdev_requests", sizeof(struct request), 0, SLAB_PANIC, NULL); blk_requestq_cachep = kmem_cache_create("request_queue", sizeof(struct request_queue), 0, SLAB_PANIC, NULL); #ifdef CONFIG_DEBUG_FS blk_debugfs_root = debugfs_create_dir("block", NULL); #endif return 0; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_545_0
crossvul-cpp_data_good_1390_2
/* libcomps - C alternative to yum.comps library * Copyright (C) 2013 Jindrich Luza * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA */ #include "comps_objradix.h" #include "comps_set.h" #include <stdio.h> void comps_objrtree_data_destroy(COMPS_ObjRTreeData * rtd) { free(rtd->key); comps_object_destroy(rtd->data); comps_hslist_destroy(&rtd->subnodes); free(rtd); } inline void comps_objrtree_data_destroy_v(void * rtd) { comps_objrtree_data_destroy((COMPS_ObjRTreeData*)rtd); } inline COMPS_ObjRTreeData * __comps_objrtree_data_create(char *key, size_t keylen, COMPS_Object *data){ COMPS_ObjRTreeData * rtd; if ((rtd = malloc(sizeof(*rtd))) == NULL) return NULL; if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) { free(rtd); return NULL; } memcpy(rtd->key, key, sizeof(char)*keylen); rtd->key[keylen] = 0; rtd->data = data; if (data != NULL) { rtd->is_leaf = 1; } rtd->subnodes = comps_hslist_create(); comps_hslist_init(rtd->subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); return rtd; } COMPS_ObjRTreeData * comps_objrtree_data_create(char *key, COMPS_Object *data) { COMPS_ObjRTreeData * rtd; rtd = __comps_objrtree_data_create(key, strlen(key), data); return rtd; } COMPS_ObjRTreeData * comps_objrtree_data_create_n(char *key, size_t keylen, COMPS_Object *data) { COMPS_ObjRTreeData * rtd; rtd = __comps_objrtree_data_create(key, keylen, data); return rtd; } static void comps_objrtree_create(COMPS_ObjRTree *rtree, COMPS_Object **args) { (void)args; rtree->subnodes = comps_hslist_create(); comps_hslist_init(rtree->subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); if (rtree->subnodes == NULL) { COMPS_OBJECT_DESTROY(rtree); return; } rtree->len = 0; } void comps_objrtree_create_u(COMPS_Object * obj, COMPS_Object **args) { (void)args; comps_objrtree_create((COMPS_ObjRTree*)obj, NULL); } static void comps_objrtree_destroy(COMPS_ObjRTree * rt) { comps_hslist_destroy(&(rt->subnodes)); } void comps_objrtree_destroy_u(COMPS_Object *obj) { comps_objrtree_destroy((COMPS_ObjRTree*)obj); } COMPS_ObjRTree * comps_objrtree_clone(COMPS_ObjRTree *rt) { COMPS_HSList *to_clone, *tmplist, *new_subnodes; COMPS_ObjRTree *ret; COMPS_HSListItem *it, *it2; COMPS_ObjRTreeData *rtdata; COMPS_Object *new_data; if (!rt) return NULL; to_clone = comps_hslist_create(); comps_hslist_init(to_clone, NULL, NULL, NULL); ret = COMPS_OBJECT_CREATE(COMPS_ObjRTree, NULL); ret->len = rt->len; for (it = rt->subnodes->first; it != NULL; it = it->next) { rtdata = comps_objrtree_data_create( ((COMPS_ObjRTreeData*)it->data)->key, NULL); if (((COMPS_ObjRTreeData*)it->data)->data != NULL) new_data = comps_object_copy(((COMPS_ObjRTreeData*)it->data)->data); else new_data = NULL; comps_hslist_destroy(&rtdata->subnodes); rtdata->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; rtdata->data = new_data; comps_hslist_append(ret->subnodes, rtdata, 0); comps_hslist_append(to_clone, rtdata, 0); } while (to_clone->first) { it2 = to_clone->first; tmplist = ((COMPS_ObjRTreeData*)it2->data)->subnodes; comps_hslist_remove(to_clone, to_clone->first); new_subnodes = comps_hslist_create(); comps_hslist_init(new_subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); for (it = tmplist->first; it != NULL; it = it->next) { rtdata = comps_objrtree_data_create( ((COMPS_ObjRTreeData*)it->data)->key, NULL); if (((COMPS_ObjRTreeData*)it->data)->data != NULL) new_data = comps_object_copy(((COMPS_ObjRTreeData*)it->data)->data); else new_data = NULL; comps_hslist_destroy(&rtdata->subnodes); rtdata->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; rtdata->data = new_data; comps_hslist_append(new_subnodes, rtdata, 0); comps_hslist_append(to_clone, rtdata, 0); } ((COMPS_ObjRTreeData*)it2->data)->subnodes = new_subnodes; free(it2); } comps_hslist_destroy(&to_clone); return ret; } void comps_objrtree_copy(COMPS_ObjRTree *rt1, COMPS_ObjRTree *rt2){ COMPS_HSList *to_clone, *tmplist, *new_subnodes; COMPS_HSListItem *it, *it2; COMPS_ObjRTreeData *rtdata; COMPS_Object *new_data; rt1->subnodes = comps_hslist_create(); comps_hslist_init(rt1->subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); if (rt1->subnodes == NULL) { COMPS_OBJECT_DESTROY(rt1); return; } rt1->len = 0; to_clone = comps_hslist_create(); comps_hslist_init(to_clone, NULL, NULL, NULL); for (it = rt2->subnodes->first; it != NULL; it = it->next) { rtdata = comps_objrtree_data_create( ((COMPS_ObjRTreeData*)it->data)->key, NULL); if (((COMPS_ObjRTreeData*)it->data)->data != NULL) new_data = comps_object_copy(((COMPS_ObjRTreeData*)it->data)->data); else new_data = NULL; comps_hslist_destroy(&rtdata->subnodes); rtdata->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; rtdata->data = new_data; comps_hslist_append(rt1->subnodes, rtdata, 0); comps_hslist_append(to_clone, rtdata, 0); } while (to_clone->first) { it2 = to_clone->first; tmplist = ((COMPS_ObjRTreeData*)it2->data)->subnodes; comps_hslist_remove(to_clone, to_clone->first); new_subnodes = comps_hslist_create(); comps_hslist_init(new_subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); for (it = tmplist->first; it != NULL; it = it->next) { rtdata = comps_objrtree_data_create( ((COMPS_ObjRTreeData*)it->data)->key, NULL); if (((COMPS_ObjRTreeData*)it->data)->data != NULL) new_data = comps_object_copy(((COMPS_ObjRTreeData*)it->data)->data); else new_data = NULL; comps_hslist_destroy(&rtdata->subnodes); rtdata->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; rtdata->data = new_data; comps_hslist_append(new_subnodes, rtdata, 0); comps_hslist_append(to_clone, rtdata, 0); } ((COMPS_ObjRTreeData*)it2->data)->subnodes = new_subnodes; free(it2); } comps_hslist_destroy(&to_clone); } COMPS_COPY_u(objrtree, COMPS_ObjRTree) /*comps_utils.h macro*/ void comps_objrtree_copy_shallow(COMPS_ObjRTree *rt1, COMPS_ObjRTree *rt2){ COMPS_HSList *to_clone, *tmplist, *new_subnodes; COMPS_HSListItem *it, *it2; COMPS_ObjRTreeData *rtdata; COMPS_Object *new_data; rt1->subnodes = comps_hslist_create(); comps_hslist_init(rt1->subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); if (rt1->subnodes == NULL) { COMPS_OBJECT_DESTROY(rt1); return; } rt1->len = 0; to_clone = comps_hslist_create(); comps_hslist_init(to_clone, NULL, NULL, NULL); for (it = rt2->subnodes->first; it != NULL; it = it->next) { rtdata = comps_objrtree_data_create( ((COMPS_ObjRTreeData*)it->data)->key, NULL); if (((COMPS_ObjRTreeData*)it->data)->data != NULL) new_data = COMPS_OBJECT_INCREF(((COMPS_ObjRTreeData*)it->data)->data); else new_data = NULL; comps_hslist_destroy(&rtdata->subnodes); rtdata->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; rtdata->data = new_data; comps_hslist_append(rt1->subnodes, rtdata, 0); comps_hslist_append(to_clone, rtdata, 0); } while (to_clone->first) { it2 = to_clone->first; tmplist = ((COMPS_ObjRTreeData*)it2->data)->subnodes; comps_hslist_remove(to_clone, to_clone->first); new_subnodes = comps_hslist_create(); comps_hslist_init(new_subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); for (it = tmplist->first; it != NULL; it = it->next) { rtdata = comps_objrtree_data_create( ((COMPS_ObjRTreeData*)it->data)->key, NULL); if (((COMPS_ObjRTreeData*)it->data)->data != NULL) new_data = comps_object_incref(((COMPS_ObjRTreeData*)it->data)->data); else new_data = NULL; comps_hslist_destroy(&rtdata->subnodes); rtdata->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; rtdata->data = new_data; comps_hslist_append(new_subnodes, rtdata, 0); comps_hslist_append(to_clone, rtdata, 0); } ((COMPS_ObjRTreeData*)it2->data)->subnodes = new_subnodes; free(it2); } comps_hslist_destroy(&to_clone); } void comps_objrtree_values_walk(COMPS_ObjRTree * rt, void* udata, void (*walk_f)(void*, COMPS_Object*)) { COMPS_HSList *tmplist, *tmp_subnodes; COMPS_HSListItem *it; tmplist = comps_hslist_create(); comps_hslist_init(tmplist, NULL, NULL, NULL); comps_hslist_append(tmplist, rt->subnodes, 0); while (tmplist->first != NULL) { it = tmplist->first; comps_hslist_remove(tmplist, tmplist->first); tmp_subnodes = (COMPS_HSList*)it->data; for (it = tmp_subnodes->first; it != NULL; it=it->next) { if (((COMPS_ObjRTreeData*)it->data)->subnodes->first) { comps_hslist_append(tmplist, ((COMPS_ObjRTreeData*)it->data)->subnodes, 0); } if (((COMPS_ObjRTreeData*)it->data)->data != NULL) { walk_f(udata, ((COMPS_ObjRTreeData*)it->data)->data); } } } comps_hslist_destroy(&tmplist); } char comps_objrtree_paircmp(void *obj1, void *obj2) { //printf("comparing %s with %s\n", ((COMPS_ObjRTreePair*)obj1)->key, // ((COMPS_ObjRTreePair*)obj2)->key); if (strcmp(((COMPS_ObjRTreePair*)obj1)->key, ((COMPS_ObjRTreePair*)obj2)->key) != 0) return 0; return comps_object_cmp(((COMPS_ObjRTreePair*)obj1)->data, ((COMPS_ObjRTreePair*)obj2)->data); } signed char comps_objrtree_cmp(COMPS_ObjRTree *ort1, COMPS_ObjRTree *ort2) { COMPS_HSList *values1, *values2; COMPS_HSListItem *it; COMPS_Set *set1, *set2; signed char ret; values1 = comps_objrtree_pairs(ort1); values2 = comps_objrtree_pairs(ort2); set1 = comps_set_create(); comps_set_init(set1, NULL, NULL, NULL, &comps_objrtree_paircmp); set2 = comps_set_create(); comps_set_init(set2, NULL, NULL, NULL, &comps_objrtree_paircmp); for (it = values1->first; it != NULL; it = it->next) { comps_set_add(set1, it->data); } for (it = values2->first; it != NULL; it = it->next) { comps_set_add(set2, it->data); } ret = comps_set_cmp(set1, set2); comps_set_destroy(&set1); comps_set_destroy(&set2); //printf("objrtree cmp %d\n", !ret); //char *str; /*for (it = values1->first; it != NULL; it = it->next) { str = comps_object_tostr(((COMPS_ObjRTreePair*)it->data)->data); printf("dict item %s=%s\n", ((COMPS_ObjRTreePair*)it->data)->key, str); free(str); } printf("----------\n"); for (it = values2->first; it != NULL; it = it->next) { str = comps_object_tostr(((COMPS_ObjRTreePair*)it->data)->data); printf("dict item %s=%s\n", ((COMPS_ObjRTreePair*)it->data)->key, str); free(str); } printf("cmp objrtree ret:%d\n", ret);*/ comps_hslist_destroy(&values1); comps_hslist_destroy(&values2); return ret==0; } COMPS_CMP_u(objrtree, COMPS_ObjRTree) void __comps_objrtree_set(COMPS_ObjRTree *rt, char *key, size_t len, COMPS_Object *ndata) { COMPS_HSListItem *it, *lesser; COMPS_HSList *subnodes; COMPS_ObjRTreeData *rtd; static COMPS_ObjRTreeData *rtdata; size_t _len, offset=0; unsigned x, found = 0; char ended; //len = strlen(key); if (rt->subnodes == NULL) return; subnodes = rt->subnodes; while (offset != len) { found = 0; lesser = NULL; for (it = subnodes->first; it != NULL; it=it->next) { if (((COMPS_ObjRTreeData*)it->data)->key[0] == key[offset]) { found = 1; break; } else if (((COMPS_ObjRTreeData*)it->data)->key[0] < key[offset]) { lesser = it; } } if (!found) { // not found in subnodes; create new subnode rtd = comps_objrtree_data_create_n(key+offset, len-offset, ndata); if (!lesser) { comps_hslist_prepend(subnodes, rtd, 0); } else { comps_hslist_insert_after(subnodes, lesser, rtd, 0); } rt->len++; return; } else { rtdata = (COMPS_ObjRTreeData*)it->data; ended = 0; for (x=1; ;x++) { if (rtdata->key[x] == 0) ended += 1; if (x == len - offset) ended += 2; if (ended != 0) break; if (key[offset+x] != rtdata->key[x]) break; } if (ended == 3) { //keys equals; data replacement comps_object_destroy(rtdata->data); rtdata->data = ndata; return; } else if (ended == 2) { //global key ends first; make global leaf //printf("ended2\n"); comps_hslist_remove(subnodes, it); it->next = NULL; rtd = comps_objrtree_data_create_n(key+offset, len-offset, ndata); comps_hslist_append(subnodes, rtd, 0); ((COMPS_ObjRTreeData*)subnodes->last->data)->subnodes->last = it; ((COMPS_ObjRTreeData*)subnodes->last->data)->subnodes->first = it; _len = len - offset; memmove(rtdata->key,rtdata->key+_len, strlen(rtdata->key) - _len); rtdata->key[strlen(rtdata->key) - _len] = 0; rtdata->key = realloc(rtdata->key, sizeof(char)* (strlen(rtdata->key)+1)); rt->len++; return; } else if (ended == 1) { //local key ends first; go deeper subnodes = rtdata->subnodes; offset += x; } else { COMPS_Object *tmpdata = rtdata->data; COMPS_HSList *tmphslist = rtdata->subnodes; //tmpch = rtdata->key[x]; // split mutual key rtdata->subnodes = comps_hslist_create(); comps_hslist_init(rtdata->subnodes, NULL, NULL, &comps_objrtree_data_destroy_v); int cmpret = strcmp(key+offset+x, rtdata->key+x); rtdata->data = NULL; if (cmpret > 0) { rtd = comps_objrtree_data_create(rtdata->key+x, tmpdata); comps_hslist_destroy(&rtd->subnodes); rtd->subnodes = tmphslist; comps_hslist_append(rtdata->subnodes,rtd, 0); rtd = comps_objrtree_data_create(key+offset+x, ndata); comps_hslist_append(rtdata->subnodes, rtd, 0); } else { rtd = comps_objrtree_data_create(key+offset+x, ndata); comps_hslist_append(rtdata->subnodes, rtd, 0); rtd = comps_objrtree_data_create(rtdata->key+x, tmpdata); comps_hslist_destroy(&rtd->subnodes); rtd->subnodes = tmphslist; comps_hslist_append(rtdata->subnodes, rtd, 0); } rtdata->key = realloc(rtdata->key, sizeof(char)*(x+1)); rtdata->key[x] = 0; rt->len++; return; } } } } void comps_objrtree_set_x(COMPS_ObjRTree *rt, char *key, COMPS_Object *data) { __comps_objrtree_set(rt, key, strlen(key), data); } void comps_objrtree_set(COMPS_ObjRTree *rt, char *key, COMPS_Object *data) { __comps_objrtree_set(rt, key, strlen(key), comps_object_incref(data)); } void comps_objrtree_set_n(COMPS_ObjRTree *rt, char *key, size_t len, COMPS_Object *data) { __comps_objrtree_set(rt, key, len, data); } void comps_objrtree_set_nx(COMPS_ObjRTree *rt, char *key, size_t len, COMPS_Object *data) { __comps_objrtree_set(rt, key, len, comps_object_incref(data)); } COMPS_Object* __comps_objrtree_get(COMPS_ObjRTree * rt, const char * key) { COMPS_HSList * subnodes; COMPS_HSListItem * it = NULL; COMPS_ObjRTreeData * rtdata; unsigned int offset, len, x; char found, ended; len = strlen(key); offset = 0; subnodes = rt->subnodes; while (offset != len) { found = 0; for (it = subnodes->first; it != NULL; it=it->next) { if (((COMPS_ObjRTreeData*)it->data)->key[0] == key[offset]) { found = 1; break; } } if (!found) { return NULL; } rtdata = (COMPS_ObjRTreeData*)it->data; for (x=1; ;x++) { ended=0; if (x == strlen(rtdata->key)) ended += 1; if (x == len-offset) ended += 2; if (ended != 0) break; if (key[offset+x] != rtdata->key[x]) break; } if (ended == 3) { return rtdata->data; } else if (ended == 1) offset+=x; else { return NULL; } subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; } if (it != NULL) { return ((COMPS_ObjRTreeData*)it->data)->data; } else { return NULL; } } COMPS_Object* comps_objrtree_get(COMPS_ObjRTree * rt, const char * key) { return comps_object_incref(__comps_objrtree_get(rt, key)); } COMPS_Object* comps_objrtree_get_x(COMPS_ObjRTree * rt, const char * key) { return __comps_objrtree_get(rt, key); } void comps_objrtree_unset(COMPS_ObjRTree * rt, const char * key) { COMPS_HSList * subnodes; COMPS_HSListItem * it; COMPS_ObjRTreeData * rtdata; unsigned int offset, len, x; char found, ended; COMPS_HSList * path; struct Relation { COMPS_HSList * parent_nodes; COMPS_HSListItem * child_it; } *relation; path = comps_hslist_create(); comps_hslist_init(path, NULL, NULL, &free); len = strlen(key); offset = 0; subnodes = rt->subnodes; while (offset != len) { found = 0; for (it = subnodes->first; it != NULL; it=it->next) { if (((COMPS_ObjRTreeData*)it->data)->key[0] == key[offset]) { found = 1; break; } } if (!found) { comps_hslist_destroy(&path); return; } rtdata = (COMPS_ObjRTreeData*)it->data; for (x=1; ;x++) { ended=0; if (rtdata->key[x] == 0) ended += 1; if (x == len - offset) ended += 2; if (ended != 0) break; if (key[offset+x] != rtdata->key[x]) break; } if (ended == 3) { /* remove node from tree only if there's no descendant*/ if (rtdata->subnodes->last == NULL) { //printf("removing all\n"); comps_hslist_remove(subnodes, it); comps_objrtree_data_destroy(rtdata); free(it); } else { //printf("removing data only\n"); comps_object_destroy(rtdata->data); rtdata->is_leaf = 0; rtdata->data = NULL; } if (path->last == NULL) { comps_hslist_destroy(&path); return; } rtdata = (COMPS_ObjRTreeData*) ((struct Relation*)path->last->data)->child_it->data; /*remove all predecessor of deleted node (recursive) with no childs*/ while (rtdata->subnodes->last == NULL) { //printf("removing '%s'\n", rtdata->key); comps_objrtree_data_destroy(rtdata); comps_hslist_remove( ((struct Relation*)path->last->data)->parent_nodes, ((struct Relation*)path->last->data)->child_it); free(((struct Relation*)path->last->data)->child_it); it = path->last; comps_hslist_remove(path, path->last); free(it); rtdata = (COMPS_ObjRTreeData*) ((struct Relation*)path->last->data)->child_it->data; } comps_hslist_destroy(&path); return; } else if (ended == 1) offset+=x; else { comps_hslist_destroy(&path); return; } if ((relation = malloc(sizeof(struct Relation))) == NULL) { comps_hslist_destroy(&path); return; } subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; relation->parent_nodes = subnodes; relation->child_it = it; comps_hslist_append(path, (void*)relation, 0); } comps_hslist_destroy(&path); return; } void comps_objrtree_clear(COMPS_ObjRTree * rt) { COMPS_HSListItem *it, *oldit; if (rt==NULL) return; if (rt->subnodes == NULL) return; oldit = rt->subnodes->first; it = (oldit)?oldit->next:NULL; for (;it != NULL; it=it->next) { comps_object_destroy(oldit->data); free(oldit); oldit = it; } if (oldit) { comps_object_destroy(oldit->data); free(oldit); } } inline COMPS_HSList* __comps_objrtree_all(COMPS_ObjRTree * rt, char keyvalpair) { COMPS_HSList *to_process, *ret; COMPS_HSListItem *hsit, *oldit; size_t x; struct Pair { char *key; void *data; COMPS_HSList *subnodes; } *pair, *current_pair=NULL;//, *oldpair=NULL; COMPS_ObjRTreePair *rtpair; to_process = comps_hslist_create(); comps_hslist_init(to_process, NULL, NULL, &free); ret = comps_hslist_create(); if (keyvalpair == 0) comps_hslist_init(ret, NULL, NULL, &free); else if (keyvalpair == 1) comps_hslist_init(ret, NULL, NULL, NULL); else comps_hslist_init(ret, NULL, NULL, &comps_objrtree_pair_destroy_v); for (hsit = rt->subnodes->first; hsit != NULL; hsit = hsit->next) { pair = malloc(sizeof(struct Pair)); pair->key = __comps_strcpy(((COMPS_ObjRTreeData*)hsit->data)->key); pair->data = ((COMPS_ObjRTreeData*)hsit->data)->data; pair->subnodes = ((COMPS_ObjRTreeData*)hsit->data)->subnodes; comps_hslist_append(to_process, pair, 0); } while (to_process->first) { //oldpair = current_pair; current_pair = to_process->first->data; oldit = to_process->first; comps_hslist_remove(to_process, to_process->first); if (current_pair->data) { if (keyvalpair == 0) { comps_hslist_append(ret, __comps_strcpy(current_pair->key), 0); } else if (keyvalpair == 1) { comps_hslist_append(ret, current_pair->data, 0); } else { rtpair = malloc(sizeof(COMPS_ObjRTreePair)); rtpair->key = __comps_strcpy(current_pair->key); rtpair->data = current_pair->data; comps_hslist_append(ret, rtpair, 0); } } for (hsit = current_pair->subnodes->first, x = 0; hsit != NULL; hsit = hsit->next, x++) { pair = malloc(sizeof(struct Pair)); pair->key = __comps_strcat(current_pair->key, ((COMPS_ObjRTreeData*)hsit->data)->key); pair->data = ((COMPS_ObjRTreeData*)hsit->data)->data; pair->subnodes = ((COMPS_ObjRTreeData*)hsit->data)->subnodes; comps_hslist_insert_at(to_process, x, pair, 0); } free(current_pair->key); free(current_pair); free(oldit); } comps_hslist_destroy(&to_process); return ret; } void comps_objrtree_unite(COMPS_ObjRTree *rt1, COMPS_ObjRTree *rt2) { COMPS_HSList *tmplist, *tmp_subnodes; COMPS_HSListItem *it; struct Pair { COMPS_HSList * subnodes; char * key; } *pair, *parent_pair; pair = malloc(sizeof(struct Pair)); pair->subnodes = rt2->subnodes; pair->key = NULL; tmplist = comps_hslist_create(); comps_hslist_init(tmplist, NULL, NULL, &free); comps_hslist_append(tmplist, pair, 0); while (tmplist->first != NULL) { it = tmplist->first; comps_hslist_remove(tmplist, tmplist->first); tmp_subnodes = ((struct Pair*)it->data)->subnodes; parent_pair = (struct Pair*) it->data; //printf("key-part:%s\n", parent_pair->key); free(it); for (it = tmp_subnodes->first; it != NULL; it=it->next) { pair = malloc(sizeof(struct Pair)); pair->subnodes = ((COMPS_ObjRTreeData*)it->data)->subnodes; if (parent_pair->key != NULL) { pair->key = malloc(sizeof(char) * (strlen(((COMPS_ObjRTreeData*)it->data)->key) + strlen(parent_pair->key) + 1)); memcpy(pair->key, parent_pair->key, sizeof(char) * strlen(parent_pair->key)); memcpy(pair->key + strlen(parent_pair->key), ((COMPS_ObjRTreeData*)it->data)->key, sizeof(char)*(strlen(((COMPS_ObjRTreeData*)it->data)->key)+1)); } else { pair->key = malloc(sizeof(char)* (strlen(((COMPS_ObjRTreeData*)it->data)->key) +1)); memcpy(pair->key, ((COMPS_ObjRTreeData*)it->data)->key, sizeof(char)*(strlen(((COMPS_ObjRTreeData*)it->data)->key)+1)); } /* current node has data */ if (((COMPS_ObjRTreeData*)it->data)->data != NULL) { comps_objrtree_set(rt1, pair->key, (((COMPS_ObjRTreeData*)it->data)->data)); } if (((COMPS_ObjRTreeData*)it->data)->subnodes->first) { comps_hslist_append(tmplist, pair, 0); } else { free(pair->key); free(pair); } } free(parent_pair->key); free(parent_pair); } comps_hslist_destroy(&tmplist); } COMPS_ObjRTree* comps_objrtree_union(COMPS_ObjRTree *rt1, COMPS_ObjRTree *rt2){ COMPS_ObjRTree *ret; ret = comps_objrtree_clone(rt1); comps_objrtree_unite(ret, rt2); return ret; } COMPS_HSList* comps_objrtree_keys(COMPS_ObjRTree * rt) { return __comps_objrtree_all(rt, 0); } COMPS_HSList* comps_objrtree_values(COMPS_ObjRTree * rt) { return __comps_objrtree_all(rt, 1); } COMPS_HSList* comps_objrtree_pairs(COMPS_ObjRTree * rt) { return __comps_objrtree_all(rt, 2); } inline void comps_objrtree_pair_destroy(COMPS_ObjRTreePair * pair) { free(pair->key); free(pair); } inline void comps_objrtree_pair_destroy_v(void * pair) { free(((COMPS_ObjRTreePair *)pair)->key); free(pair); } COMPS_ObjectInfo COMPS_ObjRTree_ObjInfo = { .obj_size = sizeof(COMPS_ObjRTree), .constructor = &comps_objrtree_create_u, .destructor = &comps_objrtree_destroy_u, .copy = &comps_objrtree_copy_u, .obj_cmp = &comps_objrtree_cmp_u };
./CrossVul/dataset_final_sorted/CWE-416/c/good_1390_2
crossvul-cpp_data_bad_4797_1
/* * History: * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), * to allow user process control of SCSI devices. * Development Sponsored by Killy Corp. NY NY * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard * Version 2 and 3 extensions to driver: * Copyright (C) 1998 - 2014 Douglas Gilbert * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * */ static int sg_version_num = 30536; /* 2 digits for each component */ #define SG_VERSION_STR "3.5.36" /* * D. P. Gilbert (dgilbert@interlog.com), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING * (otherwise the macros compile to empty statements). * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/uio.h> #include "scsi.h" #include <scsi/scsi_dbg.h> #include <scsi/scsi_host.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #include "scsi_logging.h" #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> static char *sg_version_date = "20140603"; static int sg_proc_init(void); static void sg_proc_cleanup(void); #endif #define SG_ALLOW_DIO_DEF 0 #define SG_MAX_DEVS 32768 /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater * than 16 bytes are "variable length" whose length is a multiple of 4 */ #define SG_MAX_CDB_SIZE 252 #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) int sg_big_buff = SG_DEF_RESERVED_SIZE; /* N.B. This variable is readable and writeable via /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer of this size (or less if there is not enough memory) will be reserved for use by this file descriptor. [Deprecated usage: this variable is also readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into the kernel (i.e. it is not a module).] */ static int def_reserved_size = -1; /* picks up init parameter */ static int sg_allow_dio = SG_ALLOW_DIO_DEF; static int scatter_elem_sz = SG_SCATTER_SZ; static int scatter_elem_sz_prev = SG_SCATTER_SZ; #define SG_SECTOR_SZ 512 static int sg_add_device(struct device *, struct class_interface *); static void sg_remove_device(struct device *, struct class_interface *); static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add_device, .remove_dev = sg_remove_device, }; typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ unsigned bufflen; /* Size of (aggregate) data buffer */ struct page **pages; int page_order; char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct sg_request *nextrp; /* NULL -> tail request (slist) */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ struct request *rq; struct bio *bio; struct execute_work ew; } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ struct list_head sfd_siblings; /* protected by device's sfd_lock */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ unsigned save_scat_len; /* original length of trunc. scat. element */ Sg_request *headrp; /* head of request slist, NULL->empty */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ char low_dma; /* as in parent but possibly overridden to 1 */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ struct kref f_ref; struct execute_work ew; } Sg_fd; typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ struct mutex open_rel_lock; /* held when in open() or release() */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ struct list_head sfds; rwlock_t sfd_lock; /* protect access to sfd list */ atomic_t detaching; /* 0->device usable, 1->device detaching */ bool exclude; /* 1->open(O_EXCL) succeeded and is active */ int open_cnt; /* count of opens (perhaps < num(sfds) ) */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ struct kref d_ref; } Sg_device; /* tasklet or soft irq callback */ static void sg_rq_end_io(struct request *rq, int uptodate); static int sg_start_req(Sg_request *srp, unsigned char *cmd); static int sg_finish_rem_req(Sg_request * srp); static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp); static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp); static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking); static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp); static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static int sg_res_in_use(Sg_fd * sfp); static Sg_device *sg_get_dev(int dev); static void sg_device_destroy(struct kref *kref); #define SZ_SG_HEADER sizeof(struct sg_header) #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) #define SZ_SG_IOVEC sizeof(sg_iovec_t) #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) #define sg_printk(prefix, sdp, fmt, a...) \ sdev_prefix_printk(prefix, (sdp)->device, \ (sdp)->disk->disk_name, fmt, ##a) static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } static int open_wait(Sg_device *sdp, int flags) { int retval = 0; if (flags & O_EXCL) { while (sdp->open_cnt > 0) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->open_cnt)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } else { while (sdp->exclude) { mutex_unlock(&sdp->open_rel_lock); retval = wait_event_interruptible(sdp->open_wait, (atomic_read(&sdp->detaching) || !sdp->exclude)); mutex_lock(&sdp->open_rel_lock); if (retval) /* -ERESTARTSYS */ return retval; if (atomic_read(&sdp->detaching)) return -ENODEV; } } return retval; } /* Returns 0 on success, else a negated errno value */ static int sg_open(struct inode *inode, struct file *filp) { int dev = iminor(inode); int flags = filp->f_flags; struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; int retval; nonseekable_open(inode, filp); if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) return -EPERM; /* Can't lock it with read only access */ sdp = sg_get_dev(dev); if (IS_ERR(sdp)) return PTR_ERR(sdp); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_open: flags=0x%x\n", flags)); /* This driver's module count bumped by fops_get in <linux/fs.h> */ /* Prevent the device driver from vanishing while we sleep */ retval = scsi_device_get(sdp->device); if (retval) goto sg_put; retval = scsi_autopm_get_device(sdp->device); if (retval) goto sdp_put; /* scsi_block_when_processing_errors() may block so bypass * check if O_NONBLOCK. Permits SCSI commands to be issued * during error recovery. Tread carefully. */ if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) { retval = -ENXIO; /* we are in error recovery for this device */ goto error_out; } mutex_lock(&sdp->open_rel_lock); if (flags & O_NONBLOCK) { if (flags & O_EXCL) { if (sdp->open_cnt > 0) { retval = -EBUSY; goto error_mutex_locked; } } else { if (sdp->exclude) { retval = -EBUSY; goto error_mutex_locked; } } } else { retval = open_wait(sdp, flags); if (retval) /* -ERESTARTSYS or -ENODEV */ goto error_mutex_locked; } /* N.B. at this point we are holding the open_rel_lock */ if (flags & O_EXCL) sdp->exclude = true; if (sdp->open_cnt < 1) { /* no existing opens */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } sfp = sg_add_sfp(sdp); if (IS_ERR(sfp)) { retval = PTR_ERR(sfp); goto out_undo; } filp->private_data = sfp; sdp->open_cnt++; mutex_unlock(&sdp->open_rel_lock); retval = 0; sg_put: kref_put(&sdp->d_ref, sg_device_destroy); return retval; out_undo: if (flags & O_EXCL) { sdp->exclude = false; /* undo if error */ wake_up_interruptible(&sdp->open_wait); } error_mutex_locked: mutex_unlock(&sdp->open_rel_lock); error_out: scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); goto sg_put; } /* Release resources associated with a successful sg_open() * Returns 0 on success, else a negated errno value */ static int sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); mutex_lock(&sdp->open_rel_lock); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; * only open(O_EXCL)s wait on 0==open_cnt so only start one */ if (sdp->exclude) { sdp->exclude = false; wake_up_interruptible_all(&sdp->open_wait); } else if (0 == sdp->open_cnt) { wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); return 0; } static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; sg_io_hdr_t *hp; struct sg_header *old_hdr = NULL; int retval = 0; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; if (sfp->force_packid && (count >= SZ_SG_HEADER)) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (!old_hdr) return -ENOMEM; if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } if (old_hdr->reply_len < 0) { if (count >= SZ_SG_IO_HDR) { sg_io_hdr_t *new_hdr; new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); if (!new_hdr) { retval = -ENOMEM; goto free_old_hdr; } retval =__copy_from_user (new_hdr, buf, SZ_SG_IO_HDR); req_pack_id = new_hdr->pack_id; kfree(new_hdr); if (retval) { retval = -EFAULT; goto free_old_hdr; } } } else req_pack_id = old_hdr->pack_id; } srp = sg_get_rq_mark(sfp, req_pack_id); if (!srp) { /* now wait on packet to arrive */ if (atomic_read(&sdp->detaching)) { retval = -ENODEV; goto free_old_hdr; } if (filp->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto free_old_hdr; } retval = wait_event_interruptible(sfp->read_wait, (atomic_read(&sdp->detaching) || (srp = sg_get_rq_mark(sfp, req_pack_id)))); if (atomic_read(&sdp->detaching)) { retval = -ENODEV; goto free_old_hdr; } if (retval) { /* -ERESTARTSYS as signal hit process */ goto free_old_hdr; } } if (srp->header.interface_id != '\0') { retval = sg_new_read(sfp, buf, count, srp); goto free_old_hdr; } hp = &srp->header; if (old_hdr == NULL) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (! old_hdr) { retval = -ENOMEM; goto free_old_hdr; } } memset(old_hdr, 0, SZ_SG_HEADER); old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; old_hdr->twelve_byte = ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; old_hdr->target_status = hp->masked_status; old_hdr->host_status = hp->host_status; old_hdr->driver_status = hp->driver_status; if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) memcpy(old_hdr->sense_buffer, srp->sense_b, sizeof (old_hdr->sense_buffer)); switch (hp->host_status) { /* This setup of 'result' is for backward compatibility and is best ignored by the user who should use target, host + driver status */ case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: old_hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: old_hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: old_hdr->result = EIO; break; case DID_ERROR: old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: old_hdr->result = EIO; break; } /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } buf += SZ_SG_HEADER; if (count > old_hdr->reply_len) count = old_hdr->reply_len; if (count > SZ_SG_HEADER) { if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } } } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); retval = count; free_old_hdr: kfree(old_hdr); return retval; } static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; int err = 0, err2; int len; if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } hp->sb_len_wr = 0; if ((hp->mx_sb_len > 0) && hp->sbp) { if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) { int sb_len = SCSI_SENSE_BUFFERSIZE; sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ len = (len > sb_len) ? sb_len : len; if (copy_to_user(hp->sbp, srp->sense_b, len)) { err = -EFAULT; goto err_out; } hp->sb_len_wr = len; } } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { err = -EFAULT; goto err_out; } err_out: err2 = sg_finish_rem_req(srp); return err ? : err2 ? : count; } static ssize_t sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } buf += SZ_SG_HEADER; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (__copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { static char cmd[TASK_COMM_LEN]; if (strcmp(current->comm, cmd)) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); strcpy(cmd, current->comm); } } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp) { int k; Sg_request *srp; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; int timeout; unsigned long ul_timeout; if (count < SZ_SG_IO_HDR) return -EINVAL; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_new_write: queue full\n")); return -EDOM; } srp->sg_io_owned = sg_io_owned; hp = &srp->header; if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { sg_remove_request(sfp, srp); return -EFAULT; } if (hp->interface_id != 'S') { sg_remove_request(sfp, srp); return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { if (hp->dxfer_len > sfp->reserve.bufflen) { sg_remove_request(sfp, srp); return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ } if (hp->flags & SG_FLAG_DIRECT_IO) { sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } if (sg_res_in_use(sfp)) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } } ul_timeout = msecs_to_jiffies(srp->header.timeout); timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { sg_remove_request(sfp, srp); return -EMSGSIZE; } if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; /* protects following copy_from_user()s + get_user()s */ } if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } if (read_only && sg_allow_access(file, cmnd)) { sg_remove_request(sfp, srp); return -EPERM; } k = sg_common_write(sfp, srp, cmnd, timeout, blocking); if (k < 0) return k; if (o_srp) *o_srp = srp; return count; } static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, at_head; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { if (srp->bio) { if (srp->rq->cmd != srp->rq->__cmd) kfree(srp->rq->cmd); blk_end_request_all(srp->rq, -EIO); srp->rq = NULL; } sg_finish_rem_req(srp); return -ENODEV; } hp->duration = jiffies_to_msecs(jiffies); if (hp->interface_id != '\0' && /* v3 (or later) interface */ (SG_FLAG_Q_AT_TAIL & hp->flags)) at_head = 0; else at_head = 1; srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, srp->rq, at_head, sg_rq_end_io); return 0; } static int srp_done(Sg_fd *sfp, Sg_request *srp) { unsigned long flags; int ret; read_lock_irqsave(&sfp->rq_list_lock, flags); ret = srp->done; read_unlock_irqrestore(&sfp->rq_list_lock, flags); return ret; } static int max_sectors_bytes(struct request_queue *q) { unsigned int max_sectors = queue_max_sectors(q); max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); return max_sectors << 9; } static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; int __user *ip = p; int result, val, read_only; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; unsigned long iflags; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch (cmd_in) { case SG_IO: if (atomic_read(&sdp->detaching)) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) return -EFAULT; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, (srp_done(sfp, srp) || atomic_read(&sdp->detaching))); if (atomic_read(&sdp->detaching)) return -ENODEV; write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; write_unlock_irq(&sfp->rq_list_lock); result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); return (result < 0) ? result : 0; } srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ case SG_SET_TIMEOUT: result = get_user(val, ip); if (result) return result; if (val < 0) return -EIO; if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ)) val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ), INT_MAX); sfp->timeout_user = val; sfp->timeout = mult_frac(val, HZ, USER_HZ); return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: result = get_user(val, ip); if (result) return result; if (val) { sfp->low_dma = 1; if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { val = (int) sfp->reserve.bufflen; sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } } else { if (atomic_read(&sdp->detaching)) return -ENODEV; sfp->low_dma = sdp->device->host->unchecked_isa_dma; } return 0; case SG_GET_LOW_DMA: return put_user((int) sfp->low_dma, ip); case SG_GET_SCSI_ID: if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) return -EFAULT; else { sg_scsi_id_t __user *sg_idp = p; if (atomic_read(&sdp->detaching)) return -ENODEV; __put_user((int) sdp->device->host->host_no, &sg_idp->host_no); __put_user((int) sdp->device->channel, &sg_idp->channel); __put_user((int) sdp->device->id, &sg_idp->scsi_id); __put_user((int) sdp->device->lun, &sg_idp->lun); __put_user((int) sdp->device->type, &sg_idp->scsi_type); __put_user((short) sdp->device->host->cmd_per_lun, &sg_idp->h_cmd_per_lun); __put_user((short) sdp->device->queue_depth, &sg_idp->d_queue_depth); __put_user(0, &sg_idp->unused[0]); __put_user(0, &sg_idp->unused[1]); return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, ip); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(srp->header.pack_id, ip); return 0; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(-1, ip); return 0; case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, ip); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, ip); case SG_SET_RESERVED_SIZE: result = get_user(val, ip); if (result) return result; if (val < 0) return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, max_sectors_bytes(sdp->device->request_queue)); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int) sfp->cmd_q, ip); case SG_SET_KEEP_ORPHAN: result = get_user(val, ip); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int) sfp->keep_orphan, ip); case SG_NEXT_CMD_LEN: result = get_user(val, ip); if (result) return result; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, ip); case SG_GET_ACCESS_COUNT: /* faked - we don't have a real access count anymore */ val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) return -EFAULT; else { sg_req_info_t *rinfo; unsigned int ms; rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; ++val, srp = srp ? srp->nextrp : srp) { memset(&rinfo[val], 0, SZ_SG_REQ_INFO); if (srp) { rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; } case SG_EMULATED_HOST: if (atomic_read(&sdp->detaching)) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; if (read_only) { unsigned char opcode = WRITE_6; Scsi_Ioctl_Command __user *siocp = p; if (copy_from_user(&opcode, siocp->data, 1)) return -EFAULT; if (sg_allow_access(filp, &opcode)) return -EPERM; } return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) return result; sdp->sgdebug = (char) val; return 0; case BLKSECTGET: return put_user(max_sectors_bytes(sdp->device->request_queue), ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, sdp->disk->disk_name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, (char *)arg); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: case SG_SCSI_RESET: if (atomic_read(&sdp->detaching)) return -ENODEV; break; default: if (read_only) return -EPERM; /* don't know so take safe approach */ break; } result = scsi_ioctl_block_when_processing_errors(sdp->device, cmd_in, filp->f_flags & O_NDELAY); if (result) return result; return scsi_ioctl(sdp->device, cmd_in, p); } #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { Sg_device *sdp; Sg_fd *sfp; struct scsi_device *sdev; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; sdev = sdp->device; if (sdev->host->hostt->compat_ioctl) { int ret; ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); return ret; } return -ENOIOCTLCMD; } #endif static unsigned int sg_poll(struct file *filp, poll_table * wait) { unsigned int res = 0; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int count = 0; unsigned long iflags; sfp = filp->private_data; if (!sfp) return POLLERR; sdp = sfp->parentdp; if (!sdp) return POLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = POLLIN | POLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (atomic_read(&sdp->detaching)) res |= POLLHUP; else if (!sfp->cmd_q) { if (0 == count) res |= POLLOUT | POLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= POLLOUT | POLLWRNORM; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_poll: res=0x%x\n", (int) res)); return res; } static int sg_fasync(int fd, struct file *filp, int mode) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_fasync: mode=%d\n", mode)); return fasync_helper(fd, filp, mode, &sfp->async_qp); } static int sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) return VM_FAULT_SIGBUS; rsv_schp = &sfp->reserve; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rsv_schp->bufflen) return VM_FAULT_SIGBUS; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_vma_fault: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { struct page *page = nth_page(rsv_schp->pages[k], offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ } sa += len; offset -= len; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sg_mmap_vm_ops = { .fault = sg_vma_fault, }; static int sg_mmap(struct file *filp, struct vm_area_struct *vma) { Sg_fd *sfp; unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; req_sz = vma->vm_end - vma->vm_start; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, "sg_mmap starting, vm_start=%p, len=%d\n", (void *) vma->vm_start, (int) req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; if (req_sz > rsv_schp->bufflen) return -ENOMEM; /* cannot map more than reserved buffer */ sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; sa += len; } sfp->mmap_called = 1; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; } static void sg_rq_end_io_usercontext(struct work_struct *work) { struct sg_request *srp = container_of(work, struct sg_request, ew.work); struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); kref_put(&sfp->f_ref, sg_remove_sfp); } /* * This function is a "bottom half" handler that is called by the mid * level when a command is completed (or has failed). */ static void sg_rq_end_io(struct request *rq, int uptodate) { struct sg_request *srp = rq->end_io_data; Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; unsigned int ms; char *sense; int result, resid, done = 1; if (WARN_ON(srp->done != 0)) return; sfp = srp->parentfp; if (WARN_ON(sfp == NULL)) return; sdp = sfp->parentdp; if (unlikely(atomic_read(&sdp->detaching))) pr_info("%s: device detaching\n", __func__); sense = rq->sense; result = rq->errors; resid = rq->resid_len; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_cmd_done: pack_id=%d, res=0x%x\n", srp->header.pack_id, result)); srp->header.resid = resid; ms = jiffies_to_msecs(jiffies); srp->header.duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; if (0 != result) { struct scsi_sense_hdr sshdr; srp->header.status = 0xff & result; srp->header.masked_status = status_byte(result); srp->header.msg_status = msg_byte(result); srp->header.host_status = host_byte(result); srp->header.driver_status = driver_byte(result); if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) __scsi_print_sense(sdp->device, __func__, sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ if (driver_byte(result) != 0 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) && !scsi_sense_is_deferred(&sshdr) && sshdr.sense_key == UNIT_ATTENTION && sdp->device->removable) { /* Detected possible disc change. Set the bit - this */ /* may be used if there are filesystems using this device */ sdp->device->changed = 1; } } /* Rely on write phase to clean out srp status values, so no "else" */ /* * Free the request as soon as it is complete so that its resources * can be reused without waiting for userspace to read() the * result. But keep the associated bio (if any) around until * blk_rq_unmap_user() can be called from user context. */ srp->rq = NULL; if (rq->cmd != rq->__cmd) kfree(rq->cmd); __blk_put_request(rq->q, rq); write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } } static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sg_compat_ioctl, #endif .open = sg_open, .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, .llseek = no_llseek, }; static struct class *sg_sysfs_class; static int sg_sysfs_valid = 0; static Sg_device * sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) { struct request_queue *q = scsidp->request_queue; Sg_device *sdp; unsigned long iflags; int error; u32 k; sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); if (!sdp) { sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " "failure\n", __func__); return ERR_PTR(-ENOMEM); } idr_preload(GFP_KERNEL); write_lock_irqsave(&sg_index_lock, iflags); error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); if (error < 0) { if (error == -ENOSPC) { sdev_printk(KERN_WARNING, scsidp, "Unable to attach sg device type=%d, minor number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); error = -ENODEV; } else { sdev_printk(KERN_WARNING, scsidp, "%s: idr " "allocation Sg_device failure: %d\n", __func__, error); } goto out_unlock; } k = error; SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, "sg_alloc: dev=%d \n", k)); sprintf(disk->disk_name, "sg%d", k); disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; mutex_init(&sdp->open_rel_lock); INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->open_wait); atomic_set(&sdp->detaching, 0); rwlock_init(&sdp->sfd_lock); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); error = 0; out_unlock: write_unlock_irqrestore(&sg_index_lock, iflags); idr_preload_end(); if (error) { kfree(sdp); return ERR_PTR(error); } return sdp; } static int sg_add_device(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); struct gendisk *disk; Sg_device *sdp = NULL; struct cdev * cdev = NULL; int error; unsigned long iflags; disk = alloc_disk(1); if (!disk) { pr_warn("%s: alloc_disk failed\n", __func__); return -ENOMEM; } disk->major = SCSI_GENERIC_MAJOR; error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { pr_warn("%s: cdev_alloc failed\n", __func__); goto out; } cdev->owner = THIS_MODULE; cdev->ops = &sg_fops; sdp = sg_alloc(disk, scsidp); if (IS_ERR(sdp)) { pr_warn("%s: sg_alloc failed\n", __func__); error = PTR_ERR(sdp); goto out; } error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); if (error) goto cdev_add_err; sdp->cdev = cdev; if (sg_sysfs_valid) { struct device *sg_class_member; sg_class_member = device_create(sg_sysfs_class, cl_dev->parent, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), sdp, "%s", disk->disk_name); if (IS_ERR(sg_class_member)) { pr_err("%s: device_create failed\n", __func__); error = PTR_ERR(sg_class_member); goto cdev_add_err; } error = sysfs_create_link(&scsidp->sdev_gendev.kobj, &sg_class_member->kobj, "generic"); if (error) pr_err("%s: unable to make symlink 'generic' back " "to sg%d\n", __func__, sdp->index); } else pr_warn("%s: sg_sys Invalid\n", __func__); sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " "type %d\n", sdp->index, scsidp->type); dev_set_drvdata(cl_dev, sdp); return 0; cdev_add_err: write_lock_irqsave(&sg_index_lock, iflags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, iflags); kfree(sdp); out: put_disk(disk); if (cdev) cdev_del(cdev); return error; } static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() * even though the refcount is 0. Therefore, do idr_remove() BEFORE * any other cleanup. */ write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); put_disk(sdp->disk); kfree(sdp); } static void sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = dev_get_drvdata(cl_dev); unsigned long iflags; Sg_fd *sfp; int val; if (!sdp) return; /* want sdp->detaching non-zero as soon as possible */ val = atomic_inc_return(&sdp->detaching); if (val > 1) return; /* only want to do following once per device */ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "%s\n", __func__)); read_lock_irqsave(&sdp->sfd_lock, iflags); list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible_all(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } wake_up_interruptible_all(&sdp->open_wait); read_unlock_irqrestore(&sdp->sfd_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); cdev_del(sdp->cdev); sdp->cdev = NULL; kref_put(&sdp->d_ref, sg_device_destroy); } module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO | S_IWUSR); module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Douglas Gilbert"); MODULE_DESCRIPTION("SCSI generic (sg) driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SG_VERSION_STR); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); static int __init init_sg(void) { int rc; if (scatter_elem_sz < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = scatter_elem_sz; } if (def_reserved_size >= 0) sg_big_buff = def_reserved_size; else def_reserved_size = sg_big_buff; rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); if ( IS_ERR(sg_sysfs_class) ) { rc = PTR_ERR(sg_sysfs_class); goto err_out; } sg_sysfs_valid = 1; rc = scsi_register_interface(&sg_interface); if (0 == rc) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_init(); #endif /* CONFIG_SCSI_PROC_FS */ return 0; } class_destroy(sg_sysfs_class); err_out: unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); return rc; } static void __exit exit_sg(void) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_cleanup(); #endif /* CONFIG_SCSI_PROC_FS */ scsi_unregister_interface(&sg_interface); class_destroy(sg_sysfs_class); sg_sysfs_valid = 0; unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); idr_destroy(&sg_index_idr); } static int sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; unsigned char *long_cmdp = NULL; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_start_req: dxfer_len=%d\n", dxfer_len)); if (hp->cmd_len > BLK_MAX_CDB) { long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL); if (!long_cmdp) return -ENOMEM; } /* * NOTE * * With scsi-mq enabled, there are a fixed number of preallocated * requests equal in number to shost->can_queue. If all of the * preallocated requests are already in use, then using GFP_ATOMIC with * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL * will cause blk_get_request() to sleep until an active command * completes, freeing up a request. Neither option is ideal, but * GFP_KERNEL is the better choice to prevent userspace from getting an * unexpected EWOULDBLOCK. * * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually * does not sleep except under memory pressure. */ rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); } blk_rq_set_block_pc(rq); if (hp->cmd_len > BLK_MAX_CDB) rq->cmd = long_cmdp; memcpy(rq->cmd, cmd, hp->cmd_len); rq->cmd_len = hp->cmd_len; srp->rq = rq; rq->end_io_data = srp; rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) sg_link_reserve(sfp, srp, dxfer_len); else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) return res; } md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } if (iov_count) { struct iovec *iov = NULL; struct iov_iter i; res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i); if (res < 0) return res; iov_iter_truncate(&i, hp->dxfer_len); res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; } static int sg_finish_rem_req(Sg_request *srp) { int ret = 0; Sg_fd *sfp = srp->parentfp; Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); if (srp->bio) ret = blk_rq_unmap_user(srp->bio); if (srp->rq) { if (srp->rq->cmd != srp->rq->__cmd) kfree(srp->rq->cmd); blk_put_request(srp->rq); } if (srp->res_used) sg_unlink_reserve(sfp, srp); else sg_remove_scat(sfp, req_schp); sg_remove_request(sfp, srp); return ret; } static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) { int sg_bufflen = tablesize * sizeof(struct page *); gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; schp->pages = kzalloc(sg_bufflen, gfp_flags); if (!schp->pages) return -ENOMEM; schp->sglist_len = sg_bufflen; return tablesize; /* number of scat_gath elements allocated */ } static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; if (blk_size < 0) return -EFAULT; if (0 == blk_size) ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = ALIGN(blk_size, SG_SECTOR_SZ); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); /* N.B. ret_sz carried into this block ... */ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); if (mx_sc_elems < 0) return mx_sc_elems; /* most likely -ENOMEM */ num = scatter_elem_sz; if (unlikely(num != scatter_elem_sz_prev)) { if (num < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = PAGE_SIZE; } else scatter_elem_sz_prev = num; } if (sfp->low_dma) gfp_mask |= GFP_DMA; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) gfp_mask |= __GFP_ZERO; order = get_order(num); retry: ret_sz = 1 << (PAGE_SHIFT + order); for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; k++, rem_sz -= ret_sz) { num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; schp->pages[k] = alloc_pages(gfp_mask, order); if (!schp->pages[k]) goto out; if (num == scatter_elem_sz_prev) { if (unlikely(ret_sz > scatter_elem_sz_prev)) { scatter_elem_sz = ret_sz; scatter_elem_sz_prev = ret_sz; } } SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", k, num, ret_sz)); } /* end of for loop */ schp->page_order = order; schp->k_use_sg = k; SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; return 0; out: for (i = 0; i < k; i++) __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; return -ENOMEM; } static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) { SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); if (schp->pages && schp->sglist_len > 0) { if (!schp->dio_in_use) { int k; for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, "sg_remove_scat: k=%d, pg=0x%p\n", k, schp->pages[k])); __free_pages(schp->pages[k], schp->page_order); } kfree(schp->pages); } } memset(schp, 0, sizeof (*schp)); } static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) { Sg_scatter_hold *schp = &srp->data; int k, num; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_read_oxfer: num_read_xfer=%d\n", num_read_xfer)); if ((!outp) || (num_read_xfer <= 0)) return 0; num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { if (__copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { if (__copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; if (num_read_xfer <= 0) break; outp += num; } } return 0; } static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold *schp = &sfp->reserve; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; if (0 == sg_build_indirect(schp, sfp, req_size)) return; else sg_remove_scat(sfp, schp); req_size >>= 1; /* divide by 2 */ } while (req_size > (PAGE_SIZE / 2)); } static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) { Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; int k, num, rem; srp->res_used = 1; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: size=%d\n", size)); rem = size; num = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg; k++) { if (rem <= num) { req_schp->k_use_sg = k + 1; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->pages = rsv_schp->pages; req_schp->bufflen = size; req_schp->page_order = rsv_schp->page_order; break; } else rem -= num; } if (k >= rsv_schp->k_use_sg) SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_link_reserve: BAD size\n")); } static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) { Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, "sg_unlink_reserve: req->k_use_sg=%d\n", (int) req_schp->k_use_sg)); req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; sfp->save_scat_len = 0; srp->res_used = 0; } static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id) { Sg_request *resp; unsigned long iflags; write_lock_irqsave(&sfp->rq_list_lock, iflags); for (resp = sfp->headrp; resp; resp = resp->nextrp) { /* look for requests that are ready + not SG_IO owned */ if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ break; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* always adds to end of list */ static Sg_request * sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; Sg_request *resp; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); resp = sfp->headrp; if (!resp) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; resp = rp; sfp->headrp = resp; } else { if (0 == sfp->cmd_q) resp = NULL; /* command queuing disallowed */ else { for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { if (!rp->parentfp) break; } if (k < SG_MAX_QUEUE) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; while (resp->nextrp) resp = resp->nextrp; resp->nextrp = rp; resp = rp; } else resp = NULL; } } if (resp) { resp->nextrp = NULL; resp->header.duration = jiffies_to_msecs(jiffies); } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { Sg_request *prev_rp; Sg_request *rp; unsigned long iflags; int res = 0; if ((!sfp) || (!srp) || (!sfp->headrp)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); prev_rp = sfp->headrp; if (srp == prev_rp) { sfp->headrp = prev_rp->nextrp; prev_rp->parentfp = NULL; res = 1; } else { while ((rp = prev_rp->nextrp)) { if (srp == rp) { prev_rp->nextrp = rp->nextrp; rp->parentfp = NULL; res = 1; break; } prev_rp = rp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return res; } static Sg_fd * sg_add_sfp(Sg_device * sdp) { Sg_fd *sfp; unsigned long iflags; int bufflen; sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) return ERR_PTR(-ENOMEM); init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); kref_init(&sfp->f_ref); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? sdp->device->host->unchecked_isa_dma : 1; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); write_unlock_irqrestore(&sdp->sfd_lock, iflags); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, max_sectors_bytes(sdp->device->request_queue)); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); kref_get(&sdp->d_ref); __module_get(THIS_MODULE); return sfp; } static void sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; /* Cleanup any responses which were never read(). */ while (sfp->headrp) sg_finish_rem_req(sfp->headrp); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); sg_remove_scat(sfp, &sfp->reserve); } SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, "sg_remove_sfp: sfp=0x%p\n", sfp)); kfree(sfp); scsi_device_put(sdp->device); kref_put(&sdp->d_ref, sg_device_destroy); module_put(THIS_MODULE); } static void sg_remove_sfp(struct kref *kref) { struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); struct sg_device *sdp = sfp->parentdp; unsigned long iflags; write_lock_irqsave(&sdp->sfd_lock, iflags); list_del(&sfp->sfd_siblings); write_unlock_irqrestore(&sdp->sfd_lock, iflags); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); } static int sg_res_in_use(Sg_fd * sfp) { const Sg_request *srp; unsigned long iflags; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) if (srp->res_used) break; read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return srp ? 1 : 0; } #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) { int *k = data; if (*k < id) *k = id; return 0; } static int sg_last_dev(void) { int k = -1; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); idr_for_each(&sg_index_idr, sg_idr_max_id, &k); read_unlock_irqrestore(&sg_index_lock, iflags); return k + 1; /* origin 1 */ } #endif /* must be called with sg_index_lock held */ static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); } static Sg_device * sg_get_dev(int dev) { struct sg_device *sdp; unsigned long flags; read_lock_irqsave(&sg_index_lock, flags); sdp = sg_lookup_dev(dev); if (!sdp) sdp = ERR_PTR(-ENXIO); else if (atomic_read(&sdp->detaching)) { /* If sdp->detaching, then the refcount may already be 0, in * which case it would be a bug to do kref_get(). */ sdp = ERR_PTR(-ENODEV); } else kref_get(&sdp->d_ref); read_unlock_irqrestore(&sg_index_lock, flags); return sdp; } #ifdef CONFIG_SCSI_PROC_FS static struct proc_dir_entry *sg_proc_sgp = NULL; static char sg_proc_sg_dirname[] = "scsi/sg"; static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations adio_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_adio, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_adio, .release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations dressz_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_dressz, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_dressz, .release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_single_open_version(struct inode *inode, struct file *file); static const struct file_operations version_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_version, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); static const struct file_operations devhdr_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_devhdr, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_dev(struct seq_file *s, void *v); static int sg_proc_open_dev(struct inode *inode, struct file *file); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); static const struct file_operations dev_fops = { .owner = THIS_MODULE, .open = sg_proc_open_dev, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_dev, }; static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static int sg_proc_open_devstrs(struct inode *inode, struct file *file); static const struct file_operations devstrs_fops = { .owner = THIS_MODULE, .open = sg_proc_open_devstrs, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_devstrs, }; static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static int sg_proc_open_debug(struct inode *inode, struct file *file); static const struct file_operations debug_fops = { .owner = THIS_MODULE, .open = sg_proc_open_debug, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_debug, }; struct sg_proc_leaf { const char * name; const struct file_operations * fops; }; static const struct sg_proc_leaf sg_proc_leaf_arr[] = { {"allow_dio", &adio_fops}, {"debug", &debug_fops}, {"def_reserved_size", &dressz_fops}, {"device_hdr", &devhdr_fops}, {"devices", &dev_fops}, {"device_strs", &devstrs_fops}, {"version", &version_fops} }; static int sg_proc_init(void) { int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); int k; sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); if (!sg_proc_sgp) return 1; for (k = 0; k < num_leaves; ++k) { const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k]; umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); } return 0; } static void sg_proc_cleanup(void) { int k; int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); if (!sg_proc_sgp) return; for (k = 0; k < num_leaves; ++k) remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); remove_proc_entry(sg_proc_sg_dirname, NULL); } static int sg_proc_seq_show_int(struct seq_file *s, void *v) { seq_printf(s, "%d\n", *((int *)s->private)); return 0; } static int sg_proc_single_open_adio(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &num); if (err) return err; sg_allow_dio = num ? 1 : 0; return count; } static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long k = ULONG_MAX; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &k); if (err) return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; } return -ERANGE; } static int sg_proc_seq_show_version(struct seq_file *s, void *v) { seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, sg_version_date); return 0; } static int sg_proc_single_open_version(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_version, NULL); } static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) { seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); return 0; } static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_devhdr, NULL); } struct sg_proc_deviter { loff_t index; size_t max; }; static void * dev_seq_start(struct seq_file *s, loff_t *pos) { struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); s->private = it; if (! it) return NULL; it->index = *pos; it->max = sg_last_dev(); if (it->index >= it->max) return NULL; return it; } static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct sg_proc_deviter * it = s->private; *pos = ++it->index; return (it->index < it->max) ? it : NULL; } static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); } static int sg_proc_open_dev(struct inode *inode, struct file *file) { return seq_open(file, &dev_seq_ops); } static int sg_proc_seq_show_dev(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if ((NULL == sdp) || (NULL == sdp->device) || (atomic_read(&sdp->detaching))) seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); else { scsidp = sdp->device; seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, (int) scsidp->type, 1, (int) scsidp->queue_depth, (int) atomic_read(&scsidp->device_busy), (int) scsi_device_online(scsidp)); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } static int sg_proc_open_devstrs(struct inode *inode, struct file *file) { return seq_open(file, &devstrs_seq_ops); } static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; scsidp = sdp ? sdp->device : NULL; if (sdp && scsidp && (!atomic_read(&sdp->detaching))) seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", scsidp->vendor, scsidp->model, scsidp->rev); else seq_puts(s, "<no active device>\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; const char * cp; unsigned int ms; k = 0; list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { k++; read_lock(&fp->rq_list_lock); /* irqs already disabled */ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " "(res)sgat=%d low_dma=%d\n", k, jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, (int) fp->low_dma); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); for (m = 0, srp = fp->headrp; srp != NULL; ++m, srp = srp->nextrp) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else cp = " rb>> "; } else { if (SG_INFO_DIRECT_IO_MASK & hp->info) cp = " dio>> "; else cp = " "; } seq_puts(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; seq_puts(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", srp->header.pack_id, blen); if (srp->done) seq_printf(s, " dur=%d", hp->duration); else { ms = jiffies_to_msecs(jiffies); seq_printf(s, " t_o/elap=%d/%d", (new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout)), (ms > hp->duration ? ms - hp->duration : 0)); } seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } if (0 == m) seq_puts(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } } static int sg_proc_open_debug(struct inode *inode, struct file *file) { return seq_open(file, &debug_seq_ops); } static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } #endif /* CONFIG_SCSI_PROC_FS */ module_init(init_sg); module_exit(exit_sg);
./CrossVul/dataset_final_sorted/CWE-416/c/bad_4797_1
crossvul-cpp_data_bad_3348_0
/* Copyright (c) 2013. The YARA Authors. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This module implements a structure I've called "arena". An arena is a data container composed of a set of pages. The arena grows automatically when needed by adding new pages to hold new data. Arenas can be saved and loaded from files. */ #include <string.h> #include <assert.h> #include <stdlib.h> #include <stdarg.h> #include <stddef.h> #include <time.h> #include <yara/arena.h> #include <yara/mem.h> #include <yara/error.h> #include <yara/limits.h> #pragma pack(push) #pragma pack(1) typedef struct _ARENA_FILE_HEADER { char magic[4]; uint32_t size; uint32_t version; } ARENA_FILE_HEADER; #pragma pack(pop) #define free_space(page) \ ((page)->size - (page)->used) // // _yr_arena_new_page // // Creates a new arena page of a given size // // Args: // size_t size - Size of the page // // Returns: // A pointer to the newly created YR_ARENA_PAGE structure // YR_ARENA_PAGE* _yr_arena_new_page( size_t size) { YR_ARENA_PAGE* new_page; new_page = (YR_ARENA_PAGE*) yr_malloc(sizeof(YR_ARENA_PAGE)); if (new_page == NULL) return NULL; new_page->address = (uint8_t*) yr_malloc(size); if (new_page->address == NULL) { yr_free(new_page); return NULL; } new_page->size = size; new_page->used = 0; new_page->next = NULL; new_page->prev = NULL; new_page->reloc_list_head = NULL; new_page->reloc_list_tail = NULL; return new_page; } // // _yr_arena_page_for_address // // Returns the page within the arena where an address reside. // // Args: // YR_ARENA* arena - Pointer to the arena // void* address - Address to be located // // Returns: // A pointer the corresponding YR_ARENA_PAGE structure where the address // resides. // YR_ARENA_PAGE* _yr_arena_page_for_address( YR_ARENA* arena, void* address) { YR_ARENA_PAGE* page; // Most of the times this function is called with an address within // the current page, let's check the current page first to avoid // looping through the page list. page = arena->current_page; if (page != NULL && (uint8_t*) address >= page->address && (uint8_t*) address < page->address + page->used) return page; page = arena->page_list_head; while (page != NULL) { if ((uint8_t*) address >= page->address && (uint8_t*) address < page->address + page->used) return page; page = page->next; } return NULL; } // // _yr_arena_make_relocatable // // Tells the arena that certain addresses contains a relocatable pointer. // // Args: // YR_ARENA* arena - Pointer the arena // void* address - Base address // va_list offsets - List of offsets relative to base address // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int _yr_arena_make_relocatable( YR_ARENA* arena, void* base, va_list offsets) { YR_RELOC* reloc; YR_ARENA_PAGE* page; size_t offset; size_t base_offset; int result = ERROR_SUCCESS; page = _yr_arena_page_for_address(arena, base); assert(page != NULL); base_offset = (uint8_t*) base - page->address; offset = va_arg(offsets, size_t); while (offset != -1) { assert(page->used >= sizeof(int64_t)); assert(base_offset + offset <= page->used - sizeof(int64_t)); reloc = (YR_RELOC*) yr_malloc(sizeof(YR_RELOC)); if (reloc == NULL) return ERROR_INSUFFICIENT_MEMORY; reloc->offset = (uint32_t) (base_offset + offset); reloc->next = NULL; if (page->reloc_list_head == NULL) page->reloc_list_head = reloc; if (page->reloc_list_tail != NULL) page->reloc_list_tail->next = reloc; page->reloc_list_tail = reloc; offset = va_arg(offsets, size_t); } return result; } // // yr_arena_create // // Creates a new arena. // // Args: // size_t initial_size - Initial size // int flags - Flags // YR_ARENA** arena - Address where a pointer to the new arena will be // written to. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_create( size_t initial_size, int flags, YR_ARENA** arena) { YR_ARENA* new_arena; YR_ARENA_PAGE* new_page; *arena = NULL; new_arena = (YR_ARENA*) yr_malloc(sizeof(YR_ARENA)); if (new_arena == NULL) return ERROR_INSUFFICIENT_MEMORY; new_page = _yr_arena_new_page(initial_size); if (new_page == NULL) { yr_free(new_arena); return ERROR_INSUFFICIENT_MEMORY; } new_arena->page_list_head = new_page; new_arena->current_page = new_page; new_arena->flags = flags | ARENA_FLAGS_COALESCED; *arena = new_arena; return ERROR_SUCCESS; } // // yr_arena_destroy // // Destroys an arena releasing its resource. // // Args: // YR_ARENA* arena - Pointer to the arena. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // void yr_arena_destroy( YR_ARENA* arena) { YR_RELOC* reloc; YR_RELOC* next_reloc; YR_ARENA_PAGE* page; YR_ARENA_PAGE* next_page; if (arena == NULL) return; page = arena->page_list_head; while(page != NULL) { next_page = page->next; reloc = page->reloc_list_head; while (reloc != NULL) { next_reloc = reloc->next; yr_free(reloc); reloc = next_reloc; } yr_free(page->address); yr_free(page); page = next_page; } yr_free(arena); } // // yr_arena_base_address // // Returns the base address for the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // // Returns: // A pointer to the arena's data. NULL if the no data has been written to // the arena yet. // void* yr_arena_base_address( YR_ARENA* arena) { if (arena->page_list_head->used == 0) return NULL; return arena->page_list_head->address; } // // yr_arena_next_address // // Given an address and an offset, returns the address where // address + offset resides. The arena is a collection of non-contiguous // regions of memory (pages), if address is pointing at the end of a page, // address + offset could cross the page boundary and point at somewhere // within the next page, this function handles these situations. It works // also with negative offsets. // // Args: // YR_ARENA* arena - Pointer to the arena. // void* address - Base address. // int offset - Offset. // // Returns: // A pointer // void* yr_arena_next_address( YR_ARENA* arena, void* address, size_t offset) { YR_ARENA_PAGE* page; page = _yr_arena_page_for_address(arena, address); assert(page != NULL); if ((uint8_t*) address + offset >= page->address && (uint8_t*) address + offset < page->address + page->used) { return (uint8_t*) address + offset; } if (offset > 0) { offset -= page->address + page->used - (uint8_t*) address; page = page->next; while (page != NULL) { if (offset < page->used) return page->address + offset; offset -= page->used; page = page->next; } } else { offset += page->used; page = page->prev; while (page != NULL) { if (offset < page->used) return page->address + page->used + offset; offset += page->used; page = page->prev; } } return NULL; } // // yr_arena_coalesce // // Coalesce the arena into a single page. This is a required step before // saving the arena to a file. // // Args: // YR_ARENA* arena - Pointer to the arena. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_coalesce( YR_ARENA* arena) { YR_ARENA_PAGE* page; YR_ARENA_PAGE* big_page; YR_ARENA_PAGE* next_page; YR_RELOC* reloc; uint8_t** reloc_address; uint8_t* reloc_target; size_t total_size = 0; page = arena->page_list_head; while(page != NULL) { total_size += page->used; page = page->next; } // Create a new page that will contain the entire arena. big_page = _yr_arena_new_page(total_size); if (big_page == NULL) return ERROR_INSUFFICIENT_MEMORY; // Copy data from current pages to the big page and adjust relocs. page = arena->page_list_head; while (page != NULL) { page->new_address = big_page->address + big_page->used; memcpy(page->new_address, page->address, page->used); reloc = page->reloc_list_head; while(reloc != NULL) { reloc->offset += (uint32_t) big_page->used; reloc = reloc->next; } if (big_page->reloc_list_head == NULL) big_page->reloc_list_head = page->reloc_list_head; if (big_page->reloc_list_tail != NULL) big_page->reloc_list_tail->next = page->reloc_list_head; if (page->reloc_list_tail != NULL) big_page->reloc_list_tail = page->reloc_list_tail; big_page->used += page->used; page = page->next; } // Relocate pointers. reloc = big_page->reloc_list_head; while (reloc != NULL) { reloc_address = (uint8_t**) (big_page->address + reloc->offset); reloc_target = *reloc_address; if (reloc_target != NULL) { page = _yr_arena_page_for_address(arena, reloc_target); assert(page != NULL); *reloc_address = page->new_address + (reloc_target - page->address); } reloc = reloc->next; } // Release current pages. page = arena->page_list_head; while(page != NULL) { next_page = page->next; yr_free(page->address); yr_free(page); page = next_page; } arena->page_list_head = big_page; arena->current_page = big_page; arena->flags |= ARENA_FLAGS_COALESCED; return ERROR_SUCCESS; } // // yr_arena_reserve_memory // // Ensures that the arena have enough contiguous memory for future allocations. // if the available space in the current page is lower than "size", a new page // is allocated. // // Args: // YR_ARENA* arena - Pointer to the arena. // size_t size - Size of the region to be reserved. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_reserve_memory( YR_ARENA* arena, size_t size) { YR_ARENA_PAGE* new_page; size_t new_page_size; uint8_t* new_page_address; if (size > free_space(arena->current_page)) { if (arena->flags & ARENA_FLAGS_FIXED_SIZE) return ERROR_INSUFFICIENT_MEMORY; // Requested space is bigger than current page's empty space, // lets calculate the size for a new page. new_page_size = arena->current_page->size * 2; while (new_page_size < size) new_page_size *= 2; if (arena->current_page->used == 0) { // Current page is not used at all, it can be reallocated. new_page_address = (uint8_t*) yr_realloc( arena->current_page->address, new_page_size); if (new_page_address == NULL) return ERROR_INSUFFICIENT_MEMORY; arena->current_page->address = new_page_address; arena->current_page->size = new_page_size; } else { new_page = _yr_arena_new_page(new_page_size); if (new_page == NULL) return ERROR_INSUFFICIENT_MEMORY; new_page->prev = arena->current_page; arena->current_page->next = new_page; arena->current_page = new_page; arena->flags &= ~ARENA_FLAGS_COALESCED; } } return ERROR_SUCCESS; } // // yr_arena_allocate_memory // // Allocates memory within the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // size_t size - Size of the region to be allocated. // void** allocated_memory - Address of a pointer to newly allocated // region. // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_allocate_memory( YR_ARENA* arena, size_t size, void** allocated_memory) { FAIL_ON_ERROR(yr_arena_reserve_memory(arena, size)); *allocated_memory = arena->current_page->address + \ arena->current_page->used; arena->current_page->used += size; return ERROR_SUCCESS; } // // yr_arena_allocate_struct // // Allocates a structure within the arena. This function is similar to // yr_arena_allocate_memory but additionally receives a variable-length // list of offsets within the structure where pointers reside. This allows // the arena to keep track of pointers that must be adjusted when memory // is relocated. This is an example on how to invoke this function: // // yr_arena_allocate_struct( // arena, // sizeof(MY_STRUCTURE), // (void**) &my_structure_ptr, // offsetof(MY_STRUCTURE, field_1), // offsetof(MY_STRUCTURE, field_2), // .. // offsetof(MY_STRUCTURE, field_N), // EOL); // // Args: // YR_ARENA* arena - Pointer to the arena. // size_t size - Size of the region to be allocated. // void** allocated_memory - Address of a pointer to newly allocated // region. // ... - Variable number of offsets relative to the // beginning of the struct. Offsets are of type // size_t. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_allocate_struct( YR_ARENA* arena, size_t size, void** allocated_memory, ...) { int result; va_list offsets; va_start(offsets, allocated_memory); result = yr_arena_allocate_memory(arena, size, allocated_memory); if (result == ERROR_SUCCESS) result = _yr_arena_make_relocatable(arena, *allocated_memory, offsets); va_end(offsets); if (result == ERROR_SUCCESS) memset(*allocated_memory, 0, size); return result; } // // yr_arena_make_relocatable // // Tells the arena that certain addresses contains a relocatable pointer. // // Args: // YR_ARENA* arena - Pointer to the arena. // void* base - Address within the arena. // ... - Variable number of size_t arguments with offsets // relative to base. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_make_relocatable( YR_ARENA* arena, void* base, ...) { int result; va_list offsets; va_start(offsets, base); result = _yr_arena_make_relocatable(arena, base, offsets); va_end(offsets); return result; } // // yr_arena_write_data // // Writes data to the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // void* data - Pointer to data to be written. // size_t size - Size of data. // void** written_data - Address where a pointer to the written data will // be returned. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_write_data( YR_ARENA* arena, void* data, size_t size, void** written_data) { void* output; int result; if (size > free_space(arena->current_page)) { result = yr_arena_allocate_memory(arena, size, &output); if (result != ERROR_SUCCESS) return result; } else { output = arena->current_page->address + arena->current_page->used; arena->current_page->used += size; } memcpy(output, data, size); if (written_data != NULL) *written_data = output; return ERROR_SUCCESS; } // // yr_arena_write_string // // Writes string to the arena. // // Args: // YR_ARENA* arena - Pointer to the arena. // const char* string - Pointer to string to be written. // char** written_string - Address where a pointer to the written data will // be returned. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_write_string( YR_ARENA* arena, const char* string, char** written_string) { return yr_arena_write_data( arena, (void*) string, strlen(string) + 1, (void**) written_string); } // // yr_arena_append // // Appends source_arena to target_arena. This operation destroys source_arena, // after returning any pointer to source_arena is no longer valid. The data // from source_arena is guaranteed to be aligned to a 16 bytes boundary when // written to the source_arena // // Args: // YR_ARENA* target_arena - Pointer to target the arena. // YR_ARENA* source_arena - Pointer to source arena. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_append( YR_ARENA* target_arena, YR_ARENA* source_arena) { uint8_t padding_data[15]; size_t padding_size = 16 - target_arena->current_page->used % 16; if (padding_size < 16) { memset(&padding_data, 0xCC, padding_size); FAIL_ON_ERROR(yr_arena_write_data( target_arena, padding_data, padding_size, NULL)); } target_arena->current_page->next = source_arena->page_list_head; source_arena->page_list_head->prev = target_arena->current_page; target_arena->current_page = source_arena->current_page; yr_free(source_arena); return ERROR_SUCCESS; } // // yr_arena_duplicate // // Duplicates the arena, making an exact copy. This function requires the // arena to be coalesced. // // Args: // YR_ARENA* arena - Pointer to the arena. // YR_ARENA** duplicated - Address where a pointer to the new arena arena // will be returned. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_duplicate( YR_ARENA* arena, YR_ARENA** duplicated) { YR_RELOC* reloc; YR_RELOC* new_reloc; YR_ARENA_PAGE* page; YR_ARENA_PAGE* new_page; YR_ARENA* new_arena; uint8_t** reloc_address; uint8_t* reloc_target; // Only coalesced arenas can be duplicated. assert(arena->flags & ARENA_FLAGS_COALESCED); page = arena->page_list_head; FAIL_ON_ERROR(yr_arena_create(page->size, arena->flags, &new_arena)); new_page = new_arena->current_page; new_page->used = page->used; memcpy(new_page->address, page->address, page->size); reloc = page->reloc_list_head; while (reloc != NULL) { new_reloc = (YR_RELOC*) yr_malloc(sizeof(YR_RELOC)); if (new_reloc == NULL) { yr_arena_destroy(new_arena); return ERROR_INSUFFICIENT_MEMORY; } new_reloc->offset = reloc->offset; new_reloc->next = NULL; if (new_page->reloc_list_head == NULL) new_page->reloc_list_head = new_reloc; if (new_page->reloc_list_tail != NULL) new_page->reloc_list_tail->next = new_reloc; new_page->reloc_list_tail = new_reloc; reloc_address = (uint8_t**) (new_page->address + new_reloc->offset); reloc_target = *reloc_address; if (reloc_target != NULL) { assert(reloc_target >= page->address); assert(reloc_target < page->address + page->used); *reloc_address = reloc_target - \ page->address + \ new_page->address; } reloc = reloc->next; } *duplicated = new_arena; return ERROR_SUCCESS; } // // yr_arena_load_stream // // Loads an arena from a stream. // // Args: // YR_STREAM* stream - Pointer to stream object // YR_ARENA** - Address where a pointer to the loaded arena // will be returned // // Returns: // ERROR_SUCCESS if successful, appropriate error code otherwise. // int yr_arena_load_stream( YR_STREAM* stream, YR_ARENA** arena) { YR_ARENA_PAGE* page; YR_ARENA* new_arena; ARENA_FILE_HEADER header; uint32_t reloc_offset; uint8_t** reloc_address; uint8_t* reloc_target; int result; if (yr_stream_read(&header, sizeof(header), 1, stream) != 1) return ERROR_INVALID_FILE; if (header.magic[0] != 'Y' || header.magic[1] != 'A' || header.magic[2] != 'R' || header.magic[3] != 'A') { return ERROR_INVALID_FILE; } if (header.size < 2048) // compiled rules are always larger than 2KB return ERROR_CORRUPT_FILE; if (header.version != ARENA_FILE_VERSION) return ERROR_UNSUPPORTED_FILE_VERSION; result = yr_arena_create(header.size, 0, &new_arena); if (result != ERROR_SUCCESS) return result; page = new_arena->current_page; if (yr_stream_read(page->address, header.size, 1, stream) != 1) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } page->used = header.size; if (yr_stream_read(&reloc_offset, sizeof(reloc_offset), 1, stream) != 1) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } while (reloc_offset != 0xFFFFFFFF) { if (reloc_offset > header.size - sizeof(uint8_t*)) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } yr_arena_make_relocatable(new_arena, page->address, reloc_offset, EOL); reloc_address = (uint8_t**) (page->address + reloc_offset); reloc_target = *reloc_address; if (reloc_target != (uint8_t*) (size_t) 0xFFFABADA) *reloc_address += (size_t) page->address; else *reloc_address = 0; if (yr_stream_read(&reloc_offset, sizeof(reloc_offset), 1, stream) != 1) { yr_arena_destroy(new_arena); return ERROR_CORRUPT_FILE; } } *arena = new_arena; return ERROR_SUCCESS; } // // yr_arena_save_stream // // Saves the arena into a stream. If the file exists its overwritten. This // function requires the arena to be coalesced. // // Args: // YR_ARENA* arena - Pointer to the arena. // YR_STREAM* stream - Pointer to stream object. // // Returns: // ERROR_SUCCESS if succeed or the corresponding error code otherwise. // int yr_arena_save_stream( YR_ARENA* arena, YR_STREAM* stream) { YR_ARENA_PAGE* page; YR_RELOC* reloc; ARENA_FILE_HEADER header; uint32_t end_marker = 0xFFFFFFFF; uint8_t** reloc_address; uint8_t* reloc_target; // Only coalesced arenas can be saved. assert(arena->flags & ARENA_FLAGS_COALESCED); page = arena->page_list_head; reloc = page->reloc_list_head; // Convert pointers to offsets before saving. while (reloc != NULL) { reloc_address = (uint8_t**) (page->address + reloc->offset); reloc_target = *reloc_address; if (reloc_target != NULL) { assert(reloc_target >= page->address); assert(reloc_target < page->address + page->used); *reloc_address = (uint8_t*) (*reloc_address - page->address); } else { *reloc_address = (uint8_t*) (size_t) 0xFFFABADA; } reloc = reloc->next; } assert(page->size < 0x80000000); // 2GB header.magic[0] = 'Y'; header.magic[1] = 'A'; header.magic[2] = 'R'; header.magic[3] = 'A'; header.size = (int32_t) page->size; header.version = ARENA_FILE_VERSION; yr_stream_write(&header, sizeof(header), 1, stream); yr_stream_write(page->address, header.size, 1, stream); reloc = page->reloc_list_head; // Convert offsets back to pointers. while (reloc != NULL) { yr_stream_write(&reloc->offset, sizeof(reloc->offset), 1, stream); reloc_address = (uint8_t**) (page->address + reloc->offset); reloc_target = *reloc_address; if (reloc_target != (void*) (size_t) 0xFFFABADA) *reloc_address += (size_t) page->address; else *reloc_address = 0; reloc = reloc->next; } yr_stream_write(&end_marker, sizeof(end_marker), 1, stream); return ERROR_SUCCESS; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_3348_0
crossvul-cpp_data_bad_1057_0
/* * fs/cifs/smb2pdu.c * * Copyright (C) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 * Author(s): Steve French (sfrench@us.ibm.com) * Pavel Shilovsky (pshilovsky@samba.org) 2012 * * Contains the routines for constructing the SMB2 PDUs themselves * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */ /* Note that there are handle based routines which must be */ /* treated slightly differently for reconnection purposes since we never */ /* want to reuse a stale file handle and only the caller knows the file info */ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/vfs.h> #include <linux/task_io_accounting_ops.h> #include <linux/uaccess.h> #include <linux/uuid.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "smb2pdu.h" #include "cifsglob.h" #include "cifsacl.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "ntlmssp.h" #include "smb2status.h" #include "smb2glob.h" #include "cifspdu.h" #include "cifs_spnego.h" #include "smbdirect.h" #include "trace.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif /* * The following table defines the expected "StructureSize" of SMB2 requests * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order. */ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ 36, /* SMB2_SESSION_SETUP */ 25, /* SMB2_LOGOFF */ 4, /* SMB2_TREE_CONNECT */ 9, /* SMB2_TREE_DISCONNECT */ 4, /* SMB2_CREATE */ 57, /* SMB2_CLOSE */ 24, /* SMB2_FLUSH */ 24, /* SMB2_READ */ 49, /* SMB2_WRITE */ 49, /* SMB2_LOCK */ 48, /* SMB2_IOCTL */ 57, /* SMB2_CANCEL */ 4, /* SMB2_ECHO */ 4, /* SMB2_QUERY_DIRECTORY */ 33, /* SMB2_CHANGE_NOTIFY */ 32, /* SMB2_QUERY_INFO */ 41, /* SMB2_SET_INFO */ 33, /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ }; int smb3_encryption_required(const struct cifs_tcon *tcon) { if (!tcon) return 0; if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) return 1; if (tcon->seal && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) return 1; return 0; } static void smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, const struct cifs_tcon *tcon) { shdr->ProtocolId = SMB2_PROTO_NUMBER; shdr->StructureSize = cpu_to_le16(64); shdr->Command = smb2_cmd; if (tcon && tcon->ses && tcon->ses->server) { struct TCP_Server_Info *server = tcon->ses->server; spin_lock(&server->req_lock); /* Request up to 10 credits but don't go over the limit. */ if (server->credits >= server->max_credits) shdr->CreditRequest = cpu_to_le16(0); else shdr->CreditRequest = cpu_to_le16( min_t(int, server->max_credits - server->credits, 10)); spin_unlock(&server->req_lock); } else { shdr->CreditRequest = cpu_to_le16(2); } shdr->ProcessId = cpu_to_le32((__u16)current->tgid); if (!tcon) goto out; /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ if ((tcon->ses) && (tcon->ses->server) && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) shdr->CreditCharge = cpu_to_le16(1); /* else CreditCharge MBZ */ shdr->TreeId = tcon->tid; /* Uid is not converted */ if (tcon->ses) shdr->SessionId = tcon->ses->Suid; /* * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have * to pass the path on the Open SMB prefixed by \\server\share. * Not sure when we would need to do the augmented path (if ever) and * setting this flag breaks the SMB2 open operation since it is * illegal to send an empty path name (without \\server\share prefix) * when the DFS flag is set in the SMB open header. We could * consider setting the flag on all operations other than open * but it is safer to net set it for now. */ /* if (tcon->share_flags & SHI1005_FLAGS_DFS) shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ if (tcon->ses && tcon->ses->server && tcon->ses->server->sign && !smb3_encryption_required(tcon)) shdr->Flags |= SMB2_FLAGS_SIGNED; out: return; } #ifdef CONFIG_CIFS_DFS_UPCALL static int __smb2_reconnect(const struct nls_table *nlsc, struct cifs_tcon *tcon) { int rc; struct dfs_cache_tgt_list tl; struct dfs_cache_tgt_iterator *it = NULL; char *tree; const char *tcp_host; size_t tcp_host_len; const char *dfs_host; size_t dfs_host_len; tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); if (!tree) return -ENOMEM; if (tcon->ipc) { scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", tcon->ses->server->hostname); rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); goto out; } if (!tcon->dfs_path) { rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); goto out; } rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); if (rc) goto out; extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, &tcp_host_len); for (it = dfs_cache_get_tgt_iterator(&tl); it; it = dfs_cache_get_next_tgt(&tl, it)) { const char *tgt = dfs_cache_get_tgt_name(it); extract_unc_hostname(tgt, &dfs_host, &dfs_host_len); if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) { cifs_dbg(FYI, "%s: skipping %.*s, doesn't match %.*s", __func__, (int)dfs_host_len, dfs_host, (int)tcp_host_len, tcp_host); continue; } scnprintf(tree, MAX_TREE_SIZE, "\\%s", tgt); rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); if (!rc) break; if (rc == -EREMOTE) break; } if (!rc) { if (it) rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1, it); else rc = -ENOENT; } dfs_cache_free_tgts(&tl); out: kfree(tree); return rc; } #else static inline int __smb2_reconnect(const struct nls_table *nlsc, struct cifs_tcon *tcon) { return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); } #endif static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) { int rc; struct nls_table *nls_codepage; struct cifs_ses *ses; struct TCP_Server_Info *server; int retries; /* * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so * check for tcp and smb session status done differently * for those three - in the calling routine. */ if (tcon == NULL) return 0; if (smb2_command == SMB2_TREE_CONNECT) return 0; if (tcon->tidStatus == CifsExiting) { /* * only tree disconnect, open, and write, * (and ulogoff which does not have tcon) * are allowed as we start force umount. */ if ((smb2_command != SMB2_WRITE) && (smb2_command != SMB2_CREATE) && (smb2_command != SMB2_TREE_DISCONNECT)) { cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb2_command); return -ENODEV; } } if ((!tcon->ses) || (tcon->ses->status == CifsExiting) || (!tcon->ses->server)) return -EIO; ses = tcon->ses; server = ses->server; retries = server->nr_targets; /* * Give demultiplex thread up to 10 seconds to each target available for * reconnect -- should be greater than cifs socket timeout which is 7 * seconds. */ while (server->tcpStatus == CifsNeedReconnect) { /* * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE * here since they are implicitly done when session drops. */ switch (smb2_command) { /* * BB Should we keep oplock break and add flush to exceptions? */ case SMB2_TREE_DISCONNECT: case SMB2_CANCEL: case SMB2_CLOSE: case SMB2_OPLOCK_BREAK: return -EAGAIN; } rc = wait_event_interruptible_timeout(server->response_q, (server->tcpStatus != CifsNeedReconnect), 10 * HZ); if (rc < 0) { cifs_dbg(FYI, "%s: aborting reconnect due to a received" " signal by the process\n", __func__); return -ERESTARTSYS; } /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) break; if (--retries) continue; /* * on "soft" mounts we wait once. Hard mounts keep * retrying until process is killed or server comes * back on-line */ if (!tcon->retry) { cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n"); return -EHOSTDOWN; } retries = server->nr_targets; } if (!tcon->ses->need_reconnect && !tcon->need_reconnect) return 0; nls_codepage = load_nls_default(); /* * need to prevent multiple threads trying to simultaneously reconnect * the same SMB session */ mutex_lock(&tcon->ses->session_mutex); /* * Recheck after acquire mutex. If another thread is negotiating * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ if (server->tcpStatus == CifsNeedReconnect) { rc = -EHOSTDOWN; mutex_unlock(&tcon->ses->session_mutex); goto out; } rc = cifs_negotiate_protocol(0, tcon->ses); if (!rc && tcon->ses->need_reconnect) rc = cifs_setup_session(0, tcon->ses, nls_codepage); if (rc || !tcon->need_reconnect) { mutex_unlock(&tcon->ses->session_mutex); goto out; } cifs_mark_open_files_invalid(tcon); if (tcon->use_persistent) tcon->need_reopen_files = true; rc = __smb2_reconnect(nls_codepage, tcon); mutex_unlock(&tcon->ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); if (rc) { /* If sess reconnected but tcon didn't, something strange ... */ printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc); goto out; } if (smb2_command != SMB2_INTERNAL_CMD) queue_delayed_work(cifsiod_wq, &server->reconnect, 0); atomic_inc(&tconInfoReconnectCount); out: /* * Check if handle based operation so we know whether we can continue * or not without returning to caller to reset file handle. */ /* * BB Is flush done by server on drop of tcp session? Should we special * case it and skip above? */ switch (smb2_command) { case SMB2_FLUSH: case SMB2_READ: case SMB2_WRITE: case SMB2_LOCK: case SMB2_IOCTL: case SMB2_QUERY_DIRECTORY: case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: case SMB2_SET_INFO: rc = -EAGAIN; } unload_nls(nls_codepage); return rc; } static void fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, unsigned int *total_len) { struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf; /* lookup word count ie StructureSize from table */ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; /* * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of * largest operations (Create) */ memset(buf, 0, 256); smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon); spdu->StructureSize2 = cpu_to_le16(parmsize); *total_len = parmsize + sizeof(struct smb2_sync_hdr); } /* * Allocate and return pointer to an SMB request hdr, and set basic * SMB information in the SMB header. If the return code is zero, this * function must have filled in request_buf pointer. */ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, void **request_buf, unsigned int *total_len) { int rc; rc = smb2_reconnect(smb2_command, tcon); if (rc) return rc; /* BB eventually switch this to SMB2 specific small buf size */ if (smb2_command == SMB2_SET_INFO) *request_buf = cifs_buf_get(); else *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } fill_small_buf(smb2_command, tcon, (struct smb2_sync_hdr *)(*request_buf), total_len); if (tcon != NULL) { uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); cifs_stats_inc(&tcon->num_smbs_sent); } return rc; } #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1) #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2) #define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100) static void build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(38); pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1); pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE); get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE); pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512; } static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */ pneg_ctxt->CipherCount = cpu_to_le16(1); /* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */ pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM; } static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt) { pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE; pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN); /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ pneg_ctxt->Name[0] = 0x93; pneg_ctxt->Name[1] = 0xAD; pneg_ctxt->Name[2] = 0x25; pneg_ctxt->Name[3] = 0x50; pneg_ctxt->Name[4] = 0x9C; pneg_ctxt->Name[5] = 0xB4; pneg_ctxt->Name[6] = 0x11; pneg_ctxt->Name[7] = 0xE7; pneg_ctxt->Name[8] = 0xB4; pneg_ctxt->Name[9] = 0x23; pneg_ctxt->Name[10] = 0x83; pneg_ctxt->Name[11] = 0xDE; pneg_ctxt->Name[12] = 0x96; pneg_ctxt->Name[13] = 0x8B; pneg_ctxt->Name[14] = 0xCD; pneg_ctxt->Name[15] = 0x7C; } static void assemble_neg_contexts(struct smb2_negotiate_req *req, unsigned int *total_len) { char *pneg_ctxt = (char *)req; unsigned int ctxt_len; if (*total_len > 200) { /* In case length corrupted don't want to overrun smb buffer */ cifs_dbg(VFS, "Bad frame length assembling neg contexts\n"); return; } /* * round up total_len of fixed part of SMB3 negotiate request to 8 * byte boundary before adding negotiate contexts */ *total_len = roundup(*total_len, 8); pneg_ctxt = (*total_len) + (char *)req; req->NegotiateContextOffset = cpu_to_le32(*total_len); build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8; *total_len += ctxt_len; pneg_ctxt += ctxt_len; build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8; *total_len += ctxt_len; pneg_ctxt += ctxt_len; build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); *total_len += sizeof(struct smb2_posix_neg_context); req->NegotiateContextCount = cpu_to_le16(3); } static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) { unsigned int len = le16_to_cpu(ctxt->DataLength); /* If invalid preauth context warn but use what we requested, SHA-512 */ if (len < MIN_PREAUTH_CTXT_DATA_LEN) { printk_once(KERN_WARNING "server sent bad preauth context\n"); return; } if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n"); if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) printk_once(KERN_WARNING "unknown SMB3 hash algorithm\n"); } static int decode_encrypt_ctx(struct TCP_Server_Info *server, struct smb2_encryption_neg_context *ctxt) { unsigned int len = le16_to_cpu(ctxt->DataLength); cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { printk_once(KERN_WARNING "server sent bad crypto ctxt len\n"); return -EINVAL; } if (le16_to_cpu(ctxt->CipherCount) != 1) { printk_once(KERN_WARNING "illegal SMB3.11 cipher count\n"); return -EINVAL; } cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM)) { printk_once(KERN_WARNING "invalid SMB3.11 cipher returned\n"); return -EINVAL; } server->cipher_type = ctxt->Ciphers[0]; server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; return 0; } static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, struct TCP_Server_Info *server, unsigned int len_of_smb) { struct smb2_neg_context *pctx; unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset); unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount); unsigned int len_of_ctxts, i; int rc = 0; cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt); if (len_of_smb <= offset) { cifs_dbg(VFS, "Invalid response: negotiate context offset\n"); return -EINVAL; } len_of_ctxts = len_of_smb - offset; for (i = 0; i < ctxt_cnt; i++) { int clen; /* check that offset is not beyond end of SMB */ if (len_of_ctxts == 0) break; if (len_of_ctxts < sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)(offset + (char *)rsp); clen = le16_to_cpu(pctx->DataLength); if (clen > len_of_ctxts) break; if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) decode_preauth_context( (struct smb2_preauth_neg_context *)pctx); else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) rc = decode_encrypt_ctx(server, (struct smb2_encryption_neg_context *)pctx); else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE) server->posix_ext_supported = true; else cifs_dbg(VFS, "unknown negcontext of type %d ignored\n", le16_to_cpu(pctx->ContextType)); if (rc) break; /* offsets must be 8 byte aligned */ clen = (clen + 7) & ~0x7; offset += clen + sizeof(struct smb2_neg_context); len_of_ctxts -= clen; } return rc; } static struct create_posix * create_posix_buf(umode_t mode) { struct create_posix *buf; buf = kzalloc(sizeof(struct create_posix), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct create_posix, Mode)); buf->ccontext.DataLength = cpu_to_le32(4); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct create_posix, Name)); buf->ccontext.NameLength = cpu_to_le16(16); /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */ buf->Name[0] = 0x93; buf->Name[1] = 0xAD; buf->Name[2] = 0x25; buf->Name[3] = 0x50; buf->Name[4] = 0x9C; buf->Name[5] = 0xB4; buf->Name[6] = 0x11; buf->Name[7] = 0xE7; buf->Name[8] = 0xB4; buf->Name[9] = 0x23; buf->Name[10] = 0x83; buf->Name[11] = 0xDE; buf->Name[12] = 0x96; buf->Name[13] = 0x8B; buf->Name[14] = 0xCD; buf->Name[15] = 0x7C; buf->Mode = cpu_to_le32(mode); cifs_dbg(FYI, "mode on posix create 0%o", mode); return buf; } static int add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = create_posix_buf(mode); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_posix); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) + iov[num - 1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix)); *num_iovec = num + 1; return 0; } /* * * SMB2 Worker functions follow: * * The general structure of the worker functions is: * 1) Call smb2_init (assembles SMB2 header) * 2) Initialize SMB2 command specific fields in fixed length area of SMB * 3) Call smb_sendrcv2 (sends request on socket and waits for response) * 4) Decode SMB2 command specific fields in the fixed length area * 5) Decode variable length data area (if any for this SMB2 command type) * 6) Call free smb buffer * 7) return * */ int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) { struct smb_rqst rqst; struct smb2_negotiate_req *req; struct smb2_negotiate_rsp *rsp; struct kvec iov[1]; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct TCP_Server_Info *server = ses->server; int blob_offset, blob_length; char *security_blob; int flags = CIFS_NEG_OP; unsigned int total_len; cifs_dbg(FYI, "Negotiate protocol\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len); if (rc) return rc; req->sync_hdr.SessionId = 0; memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE); if (strcmp(ses->server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); req->DialectCount = cpu_to_le16(2); total_len += 4; } else if (strcmp(ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); req->DialectCount = cpu_to_le16(4); total_len += 8; } else { /* otherwise send specific dialect */ req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); req->DialectCount = cpu_to_le16(1); total_len += 2; } /* only one of SMB2 signing flags may be set in SMB2 request */ if (ses->sign) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else req->SecurityMode = 0; req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities); /* ClientGUID must be zero for SMB2.02 dialect */ if (ses->server->vals->protocol_id == SMB20_PROT_ID) memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); else { memcpy(req->ClientGUID, server->client_guid, SMB2_CLIENT_GUID_SIZE); if ((ses->server->vals->protocol_id == SMB311_PROT_ID) || (strcmp(ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0)) assemble_neg_contexts(req, &total_len); } iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ if (rc == -EOPNOTSUPP) { cifs_dbg(VFS, "Dialect not supported by server. Consider " "specifying vers=1.0 or vers=2.0 on mount for accessing" " older servers\n"); goto neg_exit; } else if (rc != 0) goto neg_exit; if (strcmp(ses->server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { cifs_dbg(VFS, "SMB2 dialect returned but not requested\n"); return -EIO; } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { cifs_dbg(VFS, "SMB2.1 dialect returned but not requested\n"); return -EIO; } } else if (strcmp(ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { cifs_dbg(VFS, "SMB2 dialect returned but not requested\n"); return -EIO; } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { /* ops set to 3.0 by default for default so update */ ses->server->ops = &smb21_operations; } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) ses->server->ops = &smb311_operations; } else if (le16_to_cpu(rsp->DialectRevision) != ses->server->vals->protocol_id) { /* if requested single dialect ensure returned dialect matched */ cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n", le16_to_cpu(rsp->DialectRevision)); return -EIO; } cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) cifs_dbg(FYI, "negotiated smb2.1 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.0 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.02 dialect\n"); else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n"); else { cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n", le16_to_cpu(rsp->DialectRevision)); rc = -EIO; goto neg_exit; } server->dialect = le16_to_cpu(rsp->DialectRevision); /* * Keep a copy of the hash after negprot. This hash will be * the starting hash value for all sessions made from this * server. */ memcpy(server->preauth_sha_hash, ses->preauth_sha_hash, SMB2_PREAUTH_HASH_SIZE); /* SMB2 only has an extended negflavor */ server->negflavor = CIFS_NEGFLAVOR_EXTENDED; /* set it to the maximum buffer size value we can send with 1 credit */ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), SMB2_MAX_BUFFER_SIZE); server->max_read = le32_to_cpu(rsp->MaxReadSize); server->max_write = le32_to_cpu(rsp->MaxWriteSize); server->sec_mode = le16_to_cpu(rsp->SecurityMode); if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode) cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n", server->sec_mode); server->capabilities = le32_to_cpu(rsp->Capabilities); /* Internal types */ server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, (struct smb2_sync_hdr *)rsp); /* * See MS-SMB2 section 2.2.4: if no blob, client picks default which * for us will be * ses->sectype = RawNTLMSSP; * but for time being this is our only auth choice so doesn't matter. * We just found a server which sets blob length to zero expecting raw. */ if (blob_length == 0) { cifs_dbg(FYI, "missing security blob on negprot\n"); server->sec_ntlmssp = true; } rc = cifs_enable_signing(server, ses->sign); if (rc) goto neg_exit; if (blob_length) { rc = decode_negTokenInit(security_blob, blob_length, server); if (rc == 1) rc = 0; else if (rc == 0) rc = -EIO; } if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { if (rsp->NegotiateContextCount) rc = smb311_decode_neg_context(rsp, server, rsp_iov.iov_len); else cifs_dbg(VFS, "Missing expected negotiate contexts\n"); } neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) { int rc; struct validate_negotiate_info_req *pneg_inbuf; struct validate_negotiate_info_rsp *pneg_rsp = NULL; u32 rsplen; u32 inbuflen; /* max of 4 dialects */ cifs_dbg(FYI, "validate negotiate\n"); /* In SMB3.11 preauth integrity supersedes validate negotiate */ if (tcon->ses->server->dialect == SMB311_PROT_ID) return 0; /* * validation ioctl must be signed, so no point sending this if we * can not sign it (ie are not known user). Even if signing is not * required (enabled but not negotiated), in those cases we selectively * sign just this, the first and only signed request on a connection. * Having validation of negotiate info helps reduce attack vectors. */ if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) return 0; /* validation requires signing */ if (tcon->ses->user_name == NULL) { cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); return 0; /* validation requires signing */ } if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS); if (!pneg_inbuf) return -ENOMEM; pneg_inbuf->Capabilities = cpu_to_le32(tcon->ses->server->vals->req_capabilities); memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid, SMB2_CLIENT_GUID_SIZE); if (tcon->ses->sign) pneg_inbuf->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); else if (global_secflags & CIFSSEC_MAY_SIGN) pneg_inbuf->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); else pneg_inbuf->SecurityMode = 0; if (strcmp(tcon->ses->server->vals->version_string, SMB3ANY_VERSION_STRING) == 0) { pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); pneg_inbuf->DialectCount = cpu_to_le16(2); /* structure is big enough for 3 dialects, sending only 2 */ inbuflen = sizeof(*pneg_inbuf) - (2 * sizeof(pneg_inbuf->Dialects[0])); } else if (strcmp(tcon->ses->server->vals->version_string, SMBDEFAULT_VERSION_STRING) == 0) { pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID); pneg_inbuf->DialectCount = cpu_to_le16(4); /* structure is big enough for 3 dialects */ inbuflen = sizeof(*pneg_inbuf); } else { /* otherwise specific dialect was requested */ pneg_inbuf->Dialects[0] = cpu_to_le16(tcon->ses->server->vals->protocol_id); pneg_inbuf->DialectCount = cpu_to_le16(1); /* structure is big enough for 3 dialects, sending only 1 */ inbuflen = sizeof(*pneg_inbuf) - sizeof(pneg_inbuf->Dialects[0]) * 2; } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char **)&pneg_rsp, &rsplen); if (rc == -EOPNOTSUPP) { /* * Old Windows versions or Netapp SMB server can return * not supported error. Client should accept it. */ cifs_dbg(VFS, "Server does not support validate negotiate\n"); return 0; } else if (rc != 0) { cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); rc = -EIO; goto out_free_inbuf; } rc = -EIO; if (rsplen != sizeof(*pneg_rsp)) { cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", rsplen); /* relax check since Mac returns max bufsize allowed on ioctl */ if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) goto out_free_rsp; } /* check validate negotiate info response matches what we got earlier */ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect)) goto vneg_out; if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) goto vneg_out; /* do not validate server guid because not saved at negprot time yet */ if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND | SMB2_LARGE_FILES) != tcon->ses->server->capabilities) goto vneg_out; /* validate negotiate successful */ rc = 0; cifs_dbg(FYI, "validate negotiate info successful\n"); goto out_free_rsp; vneg_out: cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); out_free_rsp: kfree(pneg_rsp); out_free_inbuf: kfree(pneg_inbuf); return rc; } enum securityEnum smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) { switch (requested) { case Kerberos: case RawNTLMSSP: return requested; case NTLMv2: return RawNTLMSSP; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; /* Fallthrough */ default: return Unspecified; } } struct SMB2_sess_data { unsigned int xid; struct cifs_ses *ses; struct nls_table *nls_cp; void (*func)(struct SMB2_sess_data *); int result; u64 previous_session; /* we will send the SMB in three pieces: * a fixed length beginning part, an optional * SPNEGO blob (which can be zero length), and a * last part which will include the strings * and rest of bcc area. This allows us to avoid * a large buffer 17K allocation */ int buf0_type; struct kvec iov[2]; }; static int SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_req *req; struct TCP_Server_Info *server = ses->server; unsigned int total_len; rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req, &total_len); if (rc) return rc; /* First session, not a reauthenticate */ req->sync_hdr.SessionId = 0; /* if reconnect, we need to send previous sess id, otherwise it is 0 */ req->PreviousSessionId = sess_data->previous_session; req->Flags = 0; /* MBZ */ /* enough to enable echos and oplocks and one max size write */ req->sync_hdr.CreditRequest = cpu_to_le16(130); /* only one of SMB2 signing flags may be set in SMB2 request */ if (server->sign) req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */ req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; else req->SecurityMode = 0; req->Capabilities = 0; req->Channel = 0; /* MBZ */ sess_data->iov[0].iov_base = (char *)req; /* 1 for pad */ sess_data->iov[0].iov_len = total_len - 1; /* * This variable will be used to clear the buffer * allocated above in case of any error in the calling function. */ sess_data->buf0_type = CIFS_SMALL_BUFFER; return 0; } static void SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) { free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; } static int SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) { int rc; struct smb_rqst rqst; struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; struct kvec rsp_iov = { NULL, 0 }; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->SecurityBufferOffset = cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */); req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = sess_data->iov; rqst.rq_nvec = 2; /* BB add code to build os and lm fields */ rc = cifs_send_recv(sess_data->xid, sess_data->ses, &rqst, &sess_data->buf0_type, CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov); cifs_small_buf_release(sess_data->iov[0].iov_base); memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); return rc; } static int SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); return rc; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); return rc; } #ifdef CONFIG_CIFS_UPCALL static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct cifs_spnego_msg *msg; struct key *spnego_key = NULL; struct smb2_sess_setup_rsp *rsp = NULL; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); spnego_key = NULL; goto out; } msg = spnego_key->payload.data[0]; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; ses->Suid = rsp->sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); rc = SMB2_sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); out: sess_data->result = rc; sess_data->func = NULL; SMB2_sess_free_buffer(sess_data); } #else static void SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) { cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); sess_data->result = -EOPNOTSUPP; sess_data->func = NULL; } #endif static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data); static void SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_rsp *rsp = NULL; char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; /* * If memory allocation is successful, caller of this function * frees it. */ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); if (!ses->ntlmssp) { rc = -ENOMEM; goto out_err; } ses->ntlmssp->sesskey_per_smbsess = true; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out_err; ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), GFP_KERNEL); if (ntlmssp_blob == NULL) { rc = -ENOMEM; goto out; } build_ntlmssp_negotiate_blob(ntlmssp_blob, ses); if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } else { blob_length = sizeof(struct _NEGOTIATE_MESSAGE); /* with raw NTLMSSP we don't encapsulate in SPNEGO */ } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; /* If true, rc here is expected and not an error */ if (sess_data->buf0_type != CIFS_NO_BUFFER && rsp->sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) rc = 0; if (rc) goto out; if (offsetof(struct smb2_sess_setup_rsp, Buffer) != le16_to_cpu(rsp->SecurityBufferOffset)) { cifs_dbg(VFS, "Invalid security buffer offset %d\n", le16_to_cpu(rsp->SecurityBufferOffset)); rc = -EIO; goto out; } rc = decode_ntlmssp_challenge(rsp->Buffer, le16_to_cpu(rsp->SecurityBufferLength), ses); if (rc) goto out; cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); ses->Suid = rsp->sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); out: kfree(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); if (!rc) { sess_data->result = 0; sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate; return; } out_err: kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static void SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb2_sess_setup_req *req; struct smb2_sess_setup_rsp *rsp = NULL; unsigned char *ntlmssp_blob = NULL; bool use_spnego = false; /* else use raw ntlmssp */ u16 blob_length = 0; rc = SMB2_sess_alloc_buffer(sess_data); if (rc) goto out; req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; req->sync_hdr.SessionId = ses->Suid; rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, sess_data->nls_cp); if (rc) { cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); goto out; } if (use_spnego) { /* BB eventually need to add this */ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n"); rc = -EOPNOTSUPP; goto out; } sess_data->iov[1].iov_base = ntlmssp_blob; sess_data->iov[1].iov_len = blob_length; rc = SMB2_sess_sendreceive(sess_data); if (rc) goto out; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; ses->Suid = rsp->sync_hdr.SessionId; ses->session_flags = le16_to_cpu(rsp->SessionFlags); rc = SMB2_sess_establish_session(sess_data); out: kfree(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->result = rc; sess_data->func = NULL; } static int SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) { int type; type = smb2_select_sectype(ses->server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); if (type == Unspecified) { cifs_dbg(VFS, "Unable to select appropriate authentication method!"); return -EINVAL; } switch (type) { case Kerberos: sess_data->func = SMB2_auth_kerberos; break; case RawNTLMSSP: sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate; break; default: cifs_dbg(VFS, "secType %d not supported!\n", type); return -EOPNOTSUPP; } return 0; } int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc = 0; struct TCP_Server_Info *server = ses->server; struct SMB2_sess_data *sess_data; cifs_dbg(FYI, "Session Setup\n"); if (!server) { WARN(1, "%s: server is NULL!\n", __func__); return -EIO; } sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL); if (!sess_data) return -ENOMEM; rc = SMB2_select_sec(ses, sess_data); if (rc) goto out; sess_data->xid = xid; sess_data->ses = ses; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; sess_data->previous_session = ses->Suid; /* * Initialize the session hash with the server one. */ memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash, SMB2_PREAUTH_HASH_SIZE); while (sess_data->func) sess_data->func(sess_data); if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) cifs_dbg(VFS, "signing requested but authenticated as guest\n"); rc = sess_data->result; out: kfree(sess_data); return rc; } int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) { struct smb_rqst rqst; struct smb2_logoff_req *req; /* response is also trivial struct */ int rc = 0; struct TCP_Server_Info *server; int flags = 0; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; cifs_dbg(FYI, "disconnect session %p\n", ses); if (ses && (ses->server)) server = ses->server; else return -EIO; /* no need to send SMB logoff if uid already closed due to reconnect */ if (ses->need_reconnect) goto smb2_session_already_dead; rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len); if (rc) return rc; /* since no tcon, smb2_init can not do this, so do here */ req->sync_hdr.SessionId = ses->Suid; if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) flags |= CIFS_TRANSFORM_REQ; else if (server->sign) req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED; flags |= CIFS_NO_RESP; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); /* * No tcon so can't do * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); */ smb2_session_already_dead: return rc; } static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code) { cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]); } #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */) /* These are similar values to what Windows uses */ static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon) { tcon->max_chunks = 256; tcon->max_bytes_chunk = 1048576; tcon->max_bytes_copy = 16777216; } int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *cp) { struct smb_rqst rqst; struct smb2_tree_connect_req *req; struct smb2_tree_connect_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov = { NULL, 0 }; int rc = 0; int resp_buftype; int unc_path_len; __le16 *unc_path = NULL; int flags = 0; unsigned int total_len; cifs_dbg(FYI, "TCON\n"); if (!(ses->server) || !tree) return -EIO; unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1; unc_path_len *= 2; if (unc_path_len < 2) { kfree(unc_path); return -EINVAL; } /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ tcon->tid = 0; atomic_set(&tcon->num_remote_opens, 0); rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req, &total_len); if (rc) { kfree(unc_path); return rc; } if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; iov[0].iov_base = (char *)req; /* 1 for pad */ iov[0].iov_len = total_len - 1; /* Testing shows that buffer offset must be at location of Buffer[0] */ req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req) - 1 /* pad */); req->PathLength = cpu_to_le16(unc_path_len - 2); iov[1].iov_base = unc_path; iov[1].iov_len = unc_path_len; /* * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1 * (Samba servers don't always set the flag so also check if null user) */ if ((ses->server->dialect == SMB311_PROT_ID) && !smb3_encryption_required(tcon) && !(ses->session_flags & (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) && ((ses->user_name != NULL) || (ses->sectype == Kerberos))) req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 2; /* Need 64 for max size write so ask for more in case not there yet */ req->sync_hdr.CreditRequest = cpu_to_le16(64); rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); if (rc != 0) { if (tcon) { cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); tcon->need_reconnect = true; } goto tcon_error_exit; } switch (rsp->ShareType) { case SMB2_SHARE_TYPE_DISK: cifs_dbg(FYI, "connection to disk share\n"); break; case SMB2_SHARE_TYPE_PIPE: tcon->pipe = true; cifs_dbg(FYI, "connection to pipe share\n"); break; case SMB2_SHARE_TYPE_PRINT: tcon->print = true; cifs_dbg(FYI, "connection to printer\n"); break; default: cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); rc = -EOPNOTSUPP; goto tcon_error_exit; } tcon->share_flags = le32_to_cpu(rsp->ShareFlags); tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = rsp->sync_hdr.TreeId; strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); if (tcon->seal && !(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) cifs_dbg(VFS, "Encryption is requested but not supported\n"); init_copy_chunk_defaults(tcon); if (tcon->ses->server->ops->validate_negotiate) rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); tcon_exit: free_rsp_buf(resp_buftype, rsp); kfree(unc_path); return rc; tcon_error_exit: if (rsp && rsp->sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); } goto tcon_exit; } int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) { struct smb_rqst rqst; struct smb2_tree_disconnect_req *req; /* response is trivial */ int rc = 0; struct cifs_ses *ses = tcon->ses; int flags = 0; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; cifs_dbg(FYI, "Tree Disconnect\n"); if (!ses || !(ses->server)) return -EIO; if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) return 0; rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; flags |= CIFS_NO_RESP; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); return rc; } static struct create_durable * create_durable_buf(void) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'Q'; return buf; } static struct create_durable * create_reconnect_durable_buf(struct cifs_fid *fid) { struct create_durable *buf; buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable, Data)); buf->ccontext.DataLength = cpu_to_le32(16); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->Data.Fid.PersistentFileId = fid->persistent_fid; buf->Data.Fid.VolatileFileId = fid->volatile_fid; /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = 'n'; buf->Name[3] = 'C'; return buf; } __u8 smb2_parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, unsigned int *epoch, char *lease_key) { char *data_offset; struct create_context *cc; unsigned int next; unsigned int remaining; char *name; data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset); remaining = le32_to_cpu(rsp->CreateContextsLength); cc = (struct create_context *)data_offset; while (remaining >= sizeof(struct create_context)) { name = le16_to_cpu(cc->NameOffset) + (char *)cc; if (le16_to_cpu(cc->NameLength) == 4 && strncmp(name, "RqLs", 4) == 0) return server->ops->parse_lease_buf(cc, epoch, lease_key); next = le32_to_cpu(cc->Next); if (!next) break; remaining -= next; cc = (struct create_context *)((char *)cc + next); } return 0; } static int add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = server->vals->create_lease_size; req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) + iov[num - 1].iov_len); le32_add_cpu(&req->CreateContextsLength, server->vals->create_lease_size); *num_iovec = num + 1; return 0; } static struct create_durable_v2 * create_durable_v2_buf(struct cifs_open_parms *oparms) { struct cifs_fid *pfid = oparms->fid; struct create_durable_v2 *buf; buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_durable_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct create_durable_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* * NB: Handle timeout defaults to 0, which allows server to choose * (most servers default to 120 seconds) and most clients default to 0. * This can be overridden at mount ("handletimeout=") if the user wants * a different persistent (or resilient) handle timeout for all opens * opens on a particular SMB3 mount. */ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout); buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); generate_random_uuid(buf->dcontext.CreateGuid); memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'Q'; return buf; } static struct create_durable_handle_reconnect_v2 * create_reconnect_durable_v2_buf(struct cifs_fid *fid) { struct create_durable_handle_reconnect_v2 *buf; buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, dcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_reconnect_context_v2)); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2, Name)); buf->ccontext.NameLength = cpu_to_le16(4); buf->dcontext.Fid.PersistentFileId = fid->persistent_fid; buf->dcontext.Fid.VolatileFileId = fid->volatile_fid; buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16); /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */ buf->Name[0] = 'D'; buf->Name[1] = 'H'; buf->Name[2] = '2'; buf->Name[3] = 'C'; return buf; } static int add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = create_durable_v2_buf(oparms); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2)); *num_iovec = num + 1; return 0; } static int add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; /* indicate that we don't need to relock the file */ oparms->reconnect = false; iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_handle_reconnect_v2)); *num_iovec = num + 1; return 0; } static int add_durable_context(struct kvec *iov, unsigned int *num_iovec, struct cifs_open_parms *oparms, bool use_persistent) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; if (use_persistent) { if (oparms->reconnect) return add_durable_reconnect_v2_context(iov, num_iovec, oparms); else return add_durable_v2_context(iov, num_iovec, oparms); } if (oparms->reconnect) { iov[num].iov_base = create_reconnect_durable_buf(oparms->fid); /* indicate that we don't need to relock the file */ oparms->reconnect = false; } else iov[num].iov_base = create_durable_buf(); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct create_durable); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32(sizeof(struct smb2_create_req) + iov[1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable)); *num_iovec = num + 1; return 0; } /* See MS-SMB2 2.2.13.2.7 */ static struct crt_twarp_ctxt * create_twarp_buf(__u64 timewarp) { struct crt_twarp_ctxt *buf; buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL); if (!buf) return NULL; buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct crt_twarp_ctxt, Timestamp)); buf->ccontext.DataLength = cpu_to_le32(8); buf->ccontext.NameOffset = cpu_to_le16(offsetof (struct crt_twarp_ctxt, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */ buf->Name[0] = 'T'; buf->Name[1] = 'W'; buf->Name[2] = 'r'; buf->Name[3] = 'p'; buf->Timestamp = cpu_to_le64(timewarp); return buf; } /* See MS-SMB2 2.2.13.2.7 */ static int add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; iov[num].iov_base = create_twarp_buf(timewarp); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = sizeof(struct crt_twarp_ctxt); if (!req->CreateContextsOffset) req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) + iov[num - 1].iov_len); le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt)); *num_iovec = num + 1; return 0; } static int alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len, const char *treename, const __le16 *path) { int treename_len, path_len; struct nls_table *cp; const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)}; /* * skip leading "\\" */ treename_len = strlen(treename); if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\')) return -EINVAL; treename += 2; treename_len -= 2; path_len = UniStrnlen((wchar_t *)path, PATH_MAX); /* * make room for one path separator between the treename and * path */ *out_len = treename_len + 1 + path_len; /* * final path needs to be null-terminated UTF16 with a * size aligned to 8 */ *out_size = roundup((*out_len+1)*2, 8); *out_path = kzalloc(*out_size, GFP_KERNEL); if (!*out_path) return -ENOMEM; cp = load_nls_default(); cifs_strtoUTF16(*out_path, treename, treename_len, cp); UniStrcat(*out_path, sep); UniStrcat(*out_path, path); unload_nls(cp); return 0; } int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, umode_t mode, struct cifs_tcon *tcon, const char *full_path, struct cifs_sb_info *cifs_sb) { struct smb_rqst rqst; struct smb2_create_req *req; struct smb2_create_rsp *rsp = NULL; struct cifs_ses *ses = tcon->ses; struct kvec iov[3]; /* make sure at least one for each open context */ struct kvec rsp_iov = {NULL, 0}; int resp_buftype; int uni_path_len; __le16 *copy_path = NULL; int copy_size; int rc = 0; unsigned int n_iov = 2; __u32 file_attributes = 0; char *pc_buf = NULL; int flags = 0; unsigned int total_len; __le16 *utf16_path = NULL; cifs_dbg(FYI, "mkdir\n"); /* resource #1: path allocation */ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); if (!utf16_path) return -ENOMEM; if (!ses || !(ses->server)) { rc = -EIO; goto err_free_path; } /* resource #2: request */ rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len); if (rc) goto err_free_path; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->ImpersonationLevel = IL_IMPERSONATION; req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES); /* File attributes ignored on open (used in create though) */ req->FileAttributes = cpu_to_le32(file_attributes); req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(FILE_CREATE); req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE); iov[0].iov_base = (char *)req; /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len = total_len - 1; req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); /* [MS-SMB2] 2.2.13 NameOffset: * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of * the SMB2 header, the file name includes a prefix that will * be processed during DFS name normalization as specified in * section 3.3.5.9. Otherwise, the file name is relative to * the share that is identified by the TreeId in the SMB2 * header. */ if (tcon->share_flags & SHI1005_FLAGS_DFS) { int name_len; req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; rc = alloc_path_with_tree_prefix(&copy_path, &copy_size, &name_len, tcon->treeName, utf16_path); if (rc) goto err_free_req; req->NameLength = cpu_to_le16(name_len * 2); uni_path_len = copy_size; /* free before overwriting resource */ kfree(utf16_path); utf16_path = copy_path; } else { uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2; /* MUST set path len (NameLength) to 0 opening root of share */ req->NameLength = cpu_to_le16(uni_path_len - 2); if (uni_path_len % 8 != 0) { copy_size = roundup(uni_path_len, 8); copy_path = kzalloc(copy_size, GFP_KERNEL); if (!copy_path) { rc = -ENOMEM; goto err_free_req; } memcpy((char *)copy_path, (const char *)utf16_path, uni_path_len); uni_path_len = copy_size; /* free before overwriting resource */ kfree(utf16_path); utf16_path = copy_path; } } iov[1].iov_len = uni_path_len; iov[1].iov_base = utf16_path; req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE; if (tcon->posix_extensions) { /* resource #3: posix buf */ rc = add_posix_context(iov, &n_iov, mode); if (rc) goto err_free_req; pc_buf = iov[n_iov-1].iov_base; } memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = n_iov; trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); /* resource #4: response buffer */ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); if (rc) { cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc); goto err_free_rsp_buf; } rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId); /* Eventually save off posix specific response info and timestaps */ err_free_rsp_buf: free_rsp_buf(resp_buftype, rsp); kfree(pc_buf); err_free_req: cifs_small_buf_release(req); err_free_path: kfree(utf16_path); return rc; } int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock, struct cifs_open_parms *oparms, __le16 *path) { struct TCP_Server_Info *server = tcon->ses->server; struct smb2_create_req *req; unsigned int n_iov = 2; __u32 file_attributes = 0; int copy_size; int uni_path_len; unsigned int total_len; struct kvec *iov = rqst->rq_iov; __le16 *copy_path; int rc; rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len); if (rc) return rc; iov[0].iov_base = (char *)req; /* -1 since last byte is buf[0] which is sent below (path) */ iov[0].iov_len = total_len - 1; if (oparms->create_options & CREATE_OPTION_READONLY) file_attributes |= ATTR_READONLY; if (oparms->create_options & CREATE_OPTION_SPECIAL) file_attributes |= ATTR_SYSTEM; req->ImpersonationLevel = IL_IMPERSONATION; req->DesiredAccess = cpu_to_le32(oparms->desired_access); /* File attributes ignored on open (used in create though) */ req->FileAttributes = cpu_to_le32(file_attributes); req->ShareAccess = FILE_SHARE_ALL_LE; req->CreateDisposition = cpu_to_le32(oparms->disposition); req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); /* [MS-SMB2] 2.2.13 NameOffset: * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of * the SMB2 header, the file name includes a prefix that will * be processed during DFS name normalization as specified in * section 3.3.5.9. Otherwise, the file name is relative to * the share that is identified by the TreeId in the SMB2 * header. */ if (tcon->share_flags & SHI1005_FLAGS_DFS) { int name_len; req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; rc = alloc_path_with_tree_prefix(&copy_path, &copy_size, &name_len, tcon->treeName, path); if (rc) return rc; req->NameLength = cpu_to_le16(name_len * 2); uni_path_len = copy_size; path = copy_path; } else { uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; /* MUST set path len (NameLength) to 0 opening root of share */ req->NameLength = cpu_to_le16(uni_path_len - 2); copy_size = uni_path_len; if (copy_size % 8 != 0) copy_size = roundup(copy_size, 8); copy_path = kzalloc(copy_size, GFP_KERNEL); if (!copy_path) return -ENOMEM; memcpy((char *)copy_path, (const char *)path, uni_path_len); uni_path_len = copy_size; path = copy_path; } iov[1].iov_len = uni_path_len; iov[1].iov_base = path; if (!server->oplocks) *oplock = SMB2_OPLOCK_LEVEL_NONE; if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && (oparms->create_options & CREATE_NOT_FILE)) req->RequestedOplockLevel = *oplock; /* no srv lease support */ else { rc = add_lease_context(server, iov, &n_iov, oparms->fid->lease_key, oplock); if (rc) return rc; } if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { /* need to set Next field of lease context if we request it */ if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) { struct create_context *ccontext = (struct create_context *)iov[n_iov-1].iov_base; ccontext->Next = cpu_to_le32(server->vals->create_lease_size); } rc = add_durable_context(iov, &n_iov, oparms, tcon->use_persistent); if (rc) return rc; } if (tcon->posix_extensions) { if (n_iov > 2) { struct create_context *ccontext = (struct create_context *)iov[n_iov-1].iov_base; ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len); } rc = add_posix_context(iov, &n_iov, oparms->mode); if (rc) return rc; } if (tcon->snapshot_time) { cifs_dbg(FYI, "adding snapshot context\n"); if (n_iov > 2) { struct create_context *ccontext = (struct create_context *)iov[n_iov-1].iov_base; ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len); } rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time); if (rc) return rc; } rqst->rq_nvec = n_iov; return 0; } /* rq_iov[0] is the request and is released by cifs_small_buf_release(). * All other vectors are freed by kfree(). */ void SMB2_open_free(struct smb_rqst *rqst) { int i; if (rqst && rqst->rq_iov) { cifs_small_buf_release(rqst->rq_iov[0].iov_base); for (i = 1; i < rqst->rq_nvec; i++) if (rqst->rq_iov[i].iov_base != smb2_padding) kfree(rqst->rq_iov[i].iov_base); } } int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, __u8 *oplock, struct smb2_file_all_info *buf, struct kvec *err_iov, int *buftype) { struct smb_rqst rqst; struct smb2_create_rsp *rsp = NULL; struct TCP_Server_Info *server; struct cifs_tcon *tcon = oparms->tcon; struct cifs_ses *ses = tcon->ses; struct kvec iov[SMB2_CREATE_IOV_SIZE]; struct kvec rsp_iov = {NULL, 0}; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; int flags = 0; cifs_dbg(FYI, "create/open\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = SMB2_CREATE_IOV_SIZE; rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path); if (rc) goto creat_exit; trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->create_options, oparms->desired_access); rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); if (err_iov && rsp) { *err_iov = rsp_iov; *buftype = resp_buftype; resp_buftype = CIFS_NO_BUFFER; rsp = NULL; } trace_smb3_open_err(xid, tcon->tid, ses->Suid, oparms->create_options, oparms->desired_access, rc); goto creat_exit; } else trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid, oparms->create_options, oparms->desired_access); atomic_inc(&tcon->num_remote_opens); oparms->fid->persistent_fid = rsp->PersistentFileId; oparms->fid->volatile_fid = rsp->VolatileFileId; #ifdef CONFIG_CIFS_DEBUG2 oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId); #endif /* CIFS_DEBUG2 */ if (buf) { memcpy(buf, &rsp->CreationTime, 32); buf->AllocationSize = rsp->AllocationSize; buf->EndOfFile = rsp->EndofFile; buf->Attributes = rsp->FileAttributes; buf->NumberOfLinks = cpu_to_le32(1); buf->DeletePending = 0; } if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) *oplock = smb2_parse_lease_state(server, rsp, &oparms->fid->epoch, oparms->fid->lease_key); else *oplock = rsp->OplockLevel; creat_exit: SMB2_open_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, u32 indatalen, __u32 max_response_size) { struct smb2_ioctl_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len); if (rc) return rc; req->CtlCode = cpu_to_le32(opcode); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* * If no input data, the size of ioctl struct in * protocol spec still includes a 1 byte data buffer, * but if input data passed to ioctl, we do not * want to double count this, so we do not send * the dummy one byte of data in iovec[0] if sending * input data (in iovec[1]). */ if (indatalen) { req->InputCount = cpu_to_le32(indatalen); /* do not set InputOffset if no input data */ req->InputOffset = cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); rqst->rq_nvec = 2; iov[0].iov_len = total_len - 1; iov[1].iov_base = in_data; iov[1].iov_len = indatalen; } else { rqst->rq_nvec = 1; iov[0].iov_len = total_len; } req->OutputOffset = 0; req->OutputCount = 0; /* MBZ */ /* * In most cases max_response_size is set to 16K (CIFSMaxBufSize) * We Could increase default MaxOutputResponse, but that could require * more credits. Windows typically sets this smaller, but for some * ioctls it may be useful to allow server to send more. No point * limiting what the server can send as long as fits in one credit * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want * to increase this limit up in the future. * Note that for snapshot queries that servers like Azure expect that * the first query be minimal size (and just used to get the number/size * of previous versions) so response size must be specified as EXACTLY * sizeof(struct snapshot_array) which is 16 when rounded up to multiple * of eight bytes. Currently that is the only case where we set max * response size smaller. */ req->MaxOutputResponse = cpu_to_le32(max_response_size); if (is_fsctl) req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); else req->Flags = 0; /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED; return 0; } void SMB2_ioctl_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } /* * SMB2 IOCTL is used for both IOCTLs and FSCTLs */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data, u32 indatalen, u32 max_out_data_len, char **out_data, u32 *plen /* returned data len */) { struct smb_rqst rqst; struct smb2_ioctl_rsp *rsp = NULL; struct cifs_ses *ses; struct kvec iov[SMB2_IOCTL_IOV_SIZE]; struct kvec rsp_iov = {NULL, 0}; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; int flags = 0; cifs_dbg(FYI, "SMB2 IOCTL\n"); if (out_data != NULL) *out_data = NULL; /* zero out returned data len, in case of error */ if (plen) *plen = 0; if (tcon) ses = tcon->ses; else return -EIO; if (!ses || !(ses->server)) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode, is_fsctl, in_data, indatalen, max_out_data_len); if (rc) goto ioctl_exit; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; if (rc != 0) trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid, ses->Suid, 0, opcode, rc); if ((rc != 0) && (rc != -EINVAL)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } else if (rc == -EINVAL) { if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && (opcode != FSCTL_SRV_COPYCHUNK)) { cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); goto ioctl_exit; } } /* check if caller wants to look at return data or just return rc */ if ((plen == NULL) || (out_data == NULL)) goto ioctl_exit; *plen = le32_to_cpu(rsp->OutputCount); /* We check for obvious errors in the output buffer length and offset */ if (*plen == 0) goto ioctl_exit; /* server returned no data */ else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); *plen = 0; rc = -EIO; goto ioctl_exit; } if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) { cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, le32_to_cpu(rsp->OutputOffset)); *plen = 0; rc = -EIO; goto ioctl_exit; } *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset), *plen, GFP_KERNEL); if (*out_data == NULL) { rc = -ENOMEM; goto ioctl_exit; } ioctl_exit: SMB2_ioctl_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } /* * Individual callers to ioctl worker function follow */ int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { int rc; struct compress_ioctl fsctl_input; char *ret_data = NULL; fsctl_input.CompressionState = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, FSCTL_SET_COMPRESSION, true /* is_fsctl */, (char *)&fsctl_input /* data input */, 2 /* in data len */, CIFSMaxBufSize /* max out data */, &ret_data /* out data */, NULL); cifs_dbg(FYI, "set compression rc %d\n", rc); return rc; } int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid) { struct smb2_close_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len); if (rc) return rc; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; return 0; } void SMB2_close_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int flags) { struct smb_rqst rqst; struct smb2_close_rsp *rsp = NULL; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype = CIFS_NO_BUFFER; int rc = 0; cifs_dbg(FYI, "Close\n"); if (!ses || !(ses->server)) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid); if (rc) goto close_exit; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid, rc); goto close_exit; } atomic_dec(&tcon->num_remote_opens); /* BB FIXME - decode close response, update inode for caching */ close_exit: SMB2_close_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0); } int smb2_validate_iov(unsigned int offset, unsigned int buffer_length, struct kvec *iov, unsigned int min_buf_size) { unsigned int smb_len = iov->iov_len; char *end_of_smb = smb_len + (char *)iov->iov_base; char *begin_of_buf = offset + (char *)iov->iov_base; char *end_of_buf = begin_of_buf + buffer_length; if (buffer_length < min_buf_size) { cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n", buffer_length, min_buf_size); return -EINVAL; } /* check if beyond RFC1001 maximum length */ if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) { cifs_dbg(VFS, "buffer length %d or smb length %d too large\n", buffer_length, smb_len); return -EINVAL; } if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) { cifs_dbg(VFS, "illegal server response, bad offset to data\n"); return -EINVAL; } return 0; } /* * If SMB buffer fields are valid, copy into temporary buffer to hold result. * Caller must free buffer. */ int smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length, struct kvec *iov, unsigned int minbufsize, char *data) { char *begin_of_buf = offset + (char *)iov->iov_base; int rc; if (!data) return -EINVAL; rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize); if (rc) return rc; memcpy(data, begin_of_buf, buffer_length); return 0; } int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, u32 additional_info, size_t output_len, size_t input_len, void *input) { struct smb2_query_info_req *req; struct kvec *iov = rqst->rq_iov; unsigned int total_len; int rc; rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req, &total_len); if (rc) return rc; req->InfoType = info_type; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; req->AdditionalInformation = cpu_to_le32(additional_info); req->OutputBufferLength = cpu_to_le32(output_len); if (input_len) { req->InputBufferLength = cpu_to_le32(input_len); /* total_len for smb query request never close to le16 max */ req->InputBufferOffset = cpu_to_le16(total_len - 1); memcpy(req->Buffer, input, input_len); } iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1 + input_len; return 0; } void SMB2_query_info_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ } static int query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type, u32 additional_info, size_t output_len, size_t min_len, void **data, u32 *dlen) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov[1]; struct kvec rsp_iov; int rc = 0; int resp_buftype = CIFS_NO_BUFFER; struct cifs_ses *ses = tcon->ses; int flags = 0; bool allocated = false; cifs_dbg(FYI, "Query Info\n"); if (!ses || !(ses->server)) return -EIO; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); memset(&iov, 0, sizeof(iov)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid, info_class, info_type, additional_info, output_len, 0, NULL); if (rc) goto qinf_exit; trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type); rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); trace_smb3_query_info_err(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type, rc); goto qinf_exit; } trace_smb3_query_info_done(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type); if (dlen) { *dlen = le32_to_cpu(rsp->OutputBufferLength); if (!*data) { *data = kmalloc(*dlen, GFP_KERNEL); if (!*data) { cifs_dbg(VFS, "Error %d allocating memory for acl\n", rc); *dlen = 0; rc = -ENOMEM; goto qinf_exit; } allocated = true; } } rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, min_len, *data); if (rc && allocated) { kfree(*data); *data = NULL; *dlen = 0; } qinf_exit: SMB2_query_info_free(&rqst); free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_all_info) + PATH_MAX * 2, sizeof(struct smb2_file_all_info), (void **)&data, NULL); } int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, void **data, u32 *plen) { __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO; *plen = 0; return query_info(xid, tcon, persistent_fid, volatile_fid, 0, SMB2_O_INFO_SECURITY, additional_info, SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); } int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid) { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0, sizeof(struct smb2_file_internal_info), sizeof(struct smb2_file_internal_info), (void **)&uniqueid, NULL); } /* * This is a no-op for now. We're not really interested in the reply, but * rather in the fact that the server sent one and that server->lstrp * gets updated. * * FIXME: maybe we should consider checking that the reply matches request? */ static void smb2_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; struct cifs_credits credits = { .value = 0, .instance = 0 }; if (mid->mid_state == MID_RESPONSE_RECEIVED || mid->mid_state == MID_RESPONSE_MALFORMED) { credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest); credits.instance = server->reconnect_instance; } DeleteMidQEntry(mid); add_credits(server, &credits, CIFS_ECHO_OP); } void smb2_reconnect_server(struct work_struct *work) { struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, reconnect.work); struct cifs_ses *ses; struct cifs_tcon *tcon, *tcon2; struct list_head tmp_list; int tcon_exist = false; int rc; int resched = false; /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ mutex_lock(&server->reconnect_mutex); INIT_LIST_HEAD(&tmp_list); cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->need_reconnect || tcon->need_reopen_files) { tcon->tc_count++; list_add_tail(&tcon->rlist, &tmp_list); tcon_exist = true; } } if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); tcon_exist = true; } } /* * Get the reference to server struct to be sure that the last call of * cifs_put_tcon() in the loop below won't release the server pointer. */ if (tcon_exist) server->srv_count++; spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon); if (!rc) cifs_reopen_persistent_handles(tcon); else resched = true; list_del_init(&tcon->rlist); cifs_put_tcon(tcon); } cifs_dbg(FYI, "Reconnecting tcons finished\n"); if (resched) queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); mutex_unlock(&server->reconnect_mutex); /* now we can safely release srv struct */ if (tcon_exist) cifs_put_tcp_session(server, 1); } int SMB2_echo(struct TCP_Server_Info *server) { struct smb2_echo_req *req; int rc = 0; struct kvec iov[1]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 1 }; unsigned int total_len; cifs_dbg(FYI, "In echo request\n"); if (server->tcpStatus == CifsNeedNegotiate) { /* No need to send echo on newly established connections */ queue_delayed_work(cifsiod_wq, &server->reconnect, 0); return rc; } rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len); if (rc) return rc; req->sync_hdr.CreditRequest = cpu_to_le16(1); iov[0].iov_len = total_len; iov[0].iov_base = (char *)req; rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, server, CIFS_ECHO_OP, NULL); if (rc) cifs_dbg(FYI, "Echo request failed: %d\n", rc); cifs_small_buf_release(req); return rc; } int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct smb_rqst rqst; struct smb2_flush_req *req; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; struct kvec rsp_iov; int resp_buftype; int rc = 0; int flags = 0; unsigned int total_len; cifs_dbg(FYI, "Flush\n"); if (!ses || !(ses->server)) return -EIO; rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid, rc); } free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } /* * To form a chain of read requests, any read requests after the first should * have the end_of_chain boolean set to true. */ static int smb2_new_read_req(void **buf, unsigned int *total_len, struct cifs_io_parms *io_parms, struct cifs_readdata *rdata, unsigned int remaining_bytes, int request_type) { int rc = -EACCES; struct smb2_read_plain_req *req = NULL; struct smb2_sync_hdr *shdr; struct TCP_Server_Info *server; rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req, total_len); if (rc) return rc; server = io_parms->tcon->ses->server; if (server == NULL) return -ECONNABORTED; shdr = &req->sync_hdr; shdr->ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->ReadChannelInfoOffset = 0; /* reserved */ req->ReadChannelInfoLength = 0; /* reserved */ req->Channel = 0; /* reserved */ req->MinimumCount = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); trace_smb3_read_enter(0 /* xid */, io_parms->persistent_fid, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length); #ifdef CONFIG_CIFS_SMB_DIRECT /* * If we want to do a RDMA write, fill in and append * smbd_buffer_descriptor_v1 to the end of read request */ if (server->rdma && rdata && !server->sign && rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { struct smbd_buffer_descriptor_v1 *v1; bool need_invalidate = io_parms->tcon->ses->server->dialect == SMB30_PROT_ID; rdata->mr = smbd_register_mr( server->smbd_conn, rdata->pages, rdata->nr_pages, rdata->page_offset, rdata->tailsz, true, need_invalidate); if (!rdata->mr) return -ENOBUFS; req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; if (need_invalidate) req->Channel = SMB2_CHANNEL_RDMA_V1; req->ReadChannelInfoOffset = cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer)); req->ReadChannelInfoLength = cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; v1->offset = cpu_to_le64(rdata->mr->mr->iova); v1->token = cpu_to_le32(rdata->mr->mr->rkey); v1->length = cpu_to_le32(rdata->mr->mr->length); *total_len += sizeof(*v1) - 1; } #endif if (request_type & CHAINED_REQUEST) { if (!(request_type & END_OF_CHAIN)) { /* next 8-byte aligned request */ *total_len = DIV_ROUND_UP(*total_len, 8) * 8; shdr->NextCommand = cpu_to_le32(*total_len); } else /* END_OF_CHAIN */ shdr->NextCommand = 0; if (request_type & RELATED_REQUEST) { shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; /* * Related requests use info from previous read request * in chain. */ shdr->SessionId = 0xFFFFFFFF; shdr->TreeId = 0xFFFFFFFF; req->PersistentFileId = 0xFFFFFFFF; req->VolatileFileId = 0xFFFFFFFF; } } if (remaining_bytes > io_parms->length) req->RemainingBytes = cpu_to_le32(remaining_bytes); else req->RemainingBytes = 0; *buf = req; return rc; } static void smb2_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)rdata->iov[0].iov_base; struct cifs_credits credits = { .value = 0, .instance = 0 }; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 2, .rq_pages = rdata->pages, .rq_offset = rdata->page_offset, .rq_npages = rdata->nr_pages, .rq_pagesz = rdata->pagesz, .rq_tailsz = rdata->tailsz }; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n", __func__, mid->mid, mid->mid_state, rdata->result, rdata->bytes); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits.value = le16_to_cpu(shdr->CreditRequest); credits.instance = server->reconnect_instance; /* result already set, check signature */ if (server->sign && !mid->decrypted) { int rc; rc = smb2_verify_signature(&rqst, server); if (rc) cifs_dbg(VFS, "SMB signature verification returned error = %d\n", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; if (server->sign && rdata->got_bytes) /* reset bytes number since we can not check a sign */ rdata->got_bytes = 0; /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->got_bytes); cifs_stats_bytes_read(tcon, rdata->got_bytes); break; case MID_RESPONSE_MALFORMED: credits.value = le16_to_cpu(shdr->CreditRequest); credits.instance = server->reconnect_instance; /* fall through */ default: rdata->result = -EIO; } #ifdef CONFIG_CIFS_SMB_DIRECT /* * If this rdata has a memmory registered, the MR can be freed * MR needs to be freed as soon as I/O finishes to prevent deadlock * because they have limited number and are used for future I/Os */ if (rdata->mr) { smbd_deregister_mr(rdata->mr); rdata->mr = NULL; } #endif if (rdata->result && rdata->result != -ENODATA) { cifs_stats_fail_inc(tcon, SMB2_READ_HE); trace_smb3_read_err(0 /* xid */, rdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, rdata->offset, rdata->bytes, rdata->result); } else trace_smb3_read_done(0 /* xid */, rdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, rdata->offset, rdata->got_bytes); queue_work(cifsiod_wq, &rdata->work); DeleteMidQEntry(mid); add_credits(server, &credits, 0); } /* smb2_async_readv - send an async read, and set up mid to handle result */ int smb2_async_readv(struct cifs_readdata *rdata) { int rc, flags = 0; char *buf; struct smb2_sync_hdr *shdr; struct cifs_io_parms io_parms; struct smb_rqst rqst = { .rq_iov = rdata->iov, .rq_nvec = 1 }; struct TCP_Server_Info *server; unsigned int total_len; cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", __func__, rdata->offset, rdata->bytes); io_parms.tcon = tlink_tcon(rdata->cfile->tlink); io_parms.offset = rdata->offset; io_parms.length = rdata->bytes; io_parms.persistent_fid = rdata->cfile->fid.persistent_fid; io_parms.volatile_fid = rdata->cfile->fid.volatile_fid; io_parms.pid = rdata->pid; server = io_parms.tcon->ses->server; rc = smb2_new_read_req( (void **) &buf, &total_len, &io_parms, rdata, 0, 0); if (rc) return rc; if (smb3_encryption_required(io_parms.tcon)) flags |= CIFS_TRANSFORM_REQ; rdata->iov[0].iov_base = buf; rdata->iov[0].iov_len = total_len; shdr = (struct smb2_sync_hdr *)buf; if (rdata->credits.value > 0) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, SMB2_MAX_BUFFER_SIZE)); shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); rc = adjust_credits(server, &rdata->credits, rdata->bytes); if (rc) goto async_readv_out; flags |= CIFS_HAS_CREDITS; } kref_get(&rdata->refcount); rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, cifs_readv_receive, smb2_readv_callback, smb3_handle_read_data, rdata, flags, &rdata->credits); if (rc) { kref_put(&rdata->refcount, cifs_readdata_release); cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, io_parms.tcon->tid, io_parms.tcon->ses->Suid, io_parms.offset, io_parms.length, rc); } async_readv_out: cifs_small_buf_release(buf); return rc; } int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *buf_type) { struct smb_rqst rqst; int resp_buftype, rc = -EACCES; struct smb2_read_plain_req *req = NULL; struct smb2_read_rsp *rsp = NULL; struct kvec iov[1]; struct kvec rsp_iov; unsigned int total_len; int flags = CIFS_LOG_ERROR; struct cifs_ses *ses = io_parms->tcon->ses; *nbytes = 0; rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); if (rc) return rc; if (smb3_encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; if (rc) { if (rc != -ENODATA) { cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); cifs_dbg(VFS, "Send error in read = %d\n", rc); trace_smb3_read_err(xid, req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, io_parms->length, rc); } else trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, 0); free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc == -ENODATA ? 0 : rc; } else trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid, ses->Suid, io_parms->offset, io_parms->length); *nbytes = le32_to_cpu(rsp->DataLength); if ((*nbytes > CIFS_MAX_MSGSIZE) || (*nbytes > io_parms->length)) { cifs_dbg(FYI, "bad length %d for count %d\n", *nbytes, io_parms->length); rc = -EIO; *nbytes = 0; } if (*buf) { memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes); free_rsp_buf(resp_buftype, rsp_iov.iov_base); } else if (resp_buftype != CIFS_NO_BUFFER) { *buf = rsp_iov.iov_base; if (resp_buftype == CIFS_SMALL_BUFFER) *buf_type = CIFS_SMALL_BUFFER; else if (resp_buftype == CIFS_LARGE_BUFFER) *buf_type = CIFS_LARGE_BUFFER; } return rc; } /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. */ static void smb2_writev_callback(struct mid_q_entry *mid) { struct cifs_writedata *wdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; unsigned int written; struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; struct cifs_credits credits = { .value = 0, .instance = 0 }; switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest); credits.instance = server->reconnect_instance; wdata->result = smb2_check_receive(mid, server, 0); if (wdata->result != 0) break; written = le32_to_cpu(rsp->DataLength); /* * Mask off high 16 bits when bytes written as returned * by the server is greater than bytes requested by the * client. OS/2 servers are known to set incorrect * CountHigh values. */ if (written > wdata->bytes) written &= 0xFFFF; if (written < wdata->bytes) wdata->result = -ENOSPC; else wdata->bytes = written; break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: wdata->result = -EAGAIN; break; case MID_RESPONSE_MALFORMED: credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest); credits.instance = server->reconnect_instance; /* fall through */ default: wdata->result = -EIO; break; } #ifdef CONFIG_CIFS_SMB_DIRECT /* * If this wdata has a memory registered, the MR can be freed * The number of MRs available is limited, it's important to recover * used MR as soon as I/O is finished. Hold MR longer in the later * I/O process can possibly result in I/O deadlock due to lack of MR * to send request on I/O retry */ if (wdata->mr) { smbd_deregister_mr(wdata->mr); wdata->mr = NULL; } #endif if (wdata->result) { cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); trace_smb3_write_err(0 /* no xid */, wdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes, wdata->result); } else trace_smb3_write_done(0 /* no xid */, wdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes); queue_work(cifsiod_wq, &wdata->work); DeleteMidQEntry(mid); add_credits(server, &credits, 0); } /* smb2_async_writev - send an async write, and set up mid to handle result */ int smb2_async_writev(struct cifs_writedata *wdata, void (*release)(struct kref *kref)) { int rc = -EACCES, flags = 0; struct smb2_write_req *req = NULL; struct smb2_sync_hdr *shdr; struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct kvec iov[1]; struct smb_rqst rqst = { }; unsigned int total_len; rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; shdr = (struct smb2_sync_hdr *)req; shdr->ProcessId = cpu_to_le32(wdata->cfile->pid); req->PersistentFileId = wdata->cfile->fid.persistent_fid; req->VolatileFileId = wdata->cfile->fid.volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Offset = cpu_to_le64(wdata->offset); req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer)); req->RemainingBytes = 0; trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes); #ifdef CONFIG_CIFS_SMB_DIRECT /* * If we want to do a server RDMA read, fill in and append * smbd_buffer_descriptor_v1 to the end of write request */ if (server->rdma && !server->sign && wdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { struct smbd_buffer_descriptor_v1 *v1; bool need_invalidate = server->dialect == SMB30_PROT_ID; wdata->mr = smbd_register_mr( server->smbd_conn, wdata->pages, wdata->nr_pages, wdata->page_offset, wdata->tailsz, false, need_invalidate); if (!wdata->mr) { rc = -ENOBUFS; goto async_writev_out; } req->Length = 0; req->DataOffset = 0; if (wdata->nr_pages > 1) req->RemainingBytes = cpu_to_le32( (wdata->nr_pages - 1) * wdata->pagesz - wdata->page_offset + wdata->tailsz ); else req->RemainingBytes = cpu_to_le32(wdata->tailsz); req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; if (need_invalidate) req->Channel = SMB2_CHANNEL_RDMA_V1; req->WriteChannelInfoOffset = cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); req->WriteChannelInfoLength = cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; v1->offset = cpu_to_le64(wdata->mr->mr->iova); v1->token = cpu_to_le32(wdata->mr->mr->rkey); v1->length = cpu_to_le32(wdata->mr->mr->length); } #endif iov[0].iov_len = total_len - 1; iov[0].iov_base = (char *)req; rqst.rq_iov = iov; rqst.rq_nvec = 1; rqst.rq_pages = wdata->pages; rqst.rq_offset = wdata->page_offset; rqst.rq_npages = wdata->nr_pages; rqst.rq_pagesz = wdata->pagesz; rqst.rq_tailsz = wdata->tailsz; #ifdef CONFIG_CIFS_SMB_DIRECT if (wdata->mr) { iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1); rqst.rq_npages = 0; } #endif cifs_dbg(FYI, "async write at %llu %u bytes\n", wdata->offset, wdata->bytes); #ifdef CONFIG_CIFS_SMB_DIRECT /* For RDMA read, I/O size is in RemainingBytes not in Length */ if (!wdata->mr) req->Length = cpu_to_le32(wdata->bytes); #else req->Length = cpu_to_le32(wdata->bytes); #endif if (wdata->credits.value > 0) { shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, SMB2_MAX_BUFFER_SIZE)); shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1); rc = adjust_credits(server, &wdata->credits, wdata->bytes); if (rc) goto async_writev_out; flags |= CIFS_HAS_CREDITS; } kref_get(&wdata->refcount); rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, wdata, flags, &wdata->credits); if (rc) { trace_smb3_write_err(0 /* no xid */, req->PersistentFileId, tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes, rc); kref_put(&wdata->refcount, release); cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); } async_writev_out: cifs_small_buf_release(req); return rc; } /* * SMB2_write function gets iov pointer to kvec array with n_vec as a length. * The length field from io_parms must be at least 1 and indicates a number of * elements with data to write that begins with position 1 in iov array. All * data length is specified by count. */ int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, struct kvec *iov, int n_vec) { struct smb_rqst rqst; int rc = 0; struct smb2_write_req *req = NULL; struct smb2_write_rsp *rsp = NULL; int resp_buftype; struct kvec rsp_iov; int flags = 0; unsigned int total_len; *nbytes = 0; if (n_vec < 1) return rc; rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req, &total_len); if (rc) return rc; if (io_parms->tcon->ses->server == NULL) return -ECONNABORTED; if (smb3_encryption_required(io_parms->tcon)) flags |= CIFS_TRANSFORM_REQ; req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid); req->PersistentFileId = io_parms->persistent_fid; req->VolatileFileId = io_parms->volatile_fid; req->WriteChannelInfoOffset = 0; req->WriteChannelInfoLength = 0; req->Channel = 0; req->Length = cpu_to_le32(io_parms->length); req->Offset = cpu_to_le64(io_parms->offset); req->DataOffset = cpu_to_le16( offsetof(struct smb2_write_req, Buffer)); req->RemainingBytes = 0; trace_smb3_write_enter(xid, io_parms->persistent_fid, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length); iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = n_vec + 1; rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; if (rc) { trace_smb3_write_err(xid, req->PersistentFileId, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, io_parms->length, rc); cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); cifs_dbg(VFS, "Send error in write = %d\n", rc); } else { *nbytes = le32_to_cpu(rsp->DataLength); trace_smb3_write_done(xid, req->PersistentFileId, io_parms->tcon->tid, io_parms->tcon->ses->Suid, io_parms->offset, *nbytes); } free_rsp_buf(resp_buftype, rsp); return rc; } static unsigned int num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size) { int len; unsigned int entrycount = 0; unsigned int next_offset = 0; char *entryptr; FILE_DIRECTORY_INFO *dir_info; if (bufstart == NULL) return 0; entryptr = bufstart; while (1) { if (entryptr + next_offset < entryptr || entryptr + next_offset > end_of_buf || entryptr + next_offset + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } entryptr = entryptr + next_offset; dir_info = (FILE_DIRECTORY_INFO *)entryptr; len = le32_to_cpu(dir_info->FileNameLength); if (entryptr + len < entryptr || entryptr + len > end_of_buf || entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } *lastentry = entryptr; entrycount++; next_offset = le32_to_cpu(dir_info->NextEntryOffset); if (!next_offset) break; } return entrycount; } /* * Readdir/FindFirst */ int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int index, struct cifs_search_info *srch_inf) { struct smb_rqst rqst; struct smb2_query_directory_req *req; struct smb2_query_directory_rsp *rsp = NULL; struct kvec iov[2]; struct kvec rsp_iov; int rc = 0; int len; int resp_buftype = CIFS_NO_BUFFER; unsigned char *bufptr; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; __le16 asteriks = cpu_to_le16('*'); char *end_of_smb; unsigned int output_size = CIFSMaxBufSize; size_t info_buf_size; int flags = 0; unsigned int total_len; if (ses && (ses->server)) server = ses->server; else return -EIO; rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; switch (srch_inf->info_level) { case SMB_FIND_FILE_DIRECTORY_INFO: req->FileInformationClass = FILE_DIRECTORY_INFORMATION; info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1; break; case SMB_FIND_FILE_ID_FULL_DIR_INFO: req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1; break; default: cifs_dbg(VFS, "info level %u isn't supported\n", srch_inf->info_level); rc = -EINVAL; goto qdir_exit; } req->FileIndex = cpu_to_le32(index); req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; len = 0x2; bufptr = req->Buffer; memcpy(bufptr, &asteriks, len); req->FileNameOffset = cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1); req->FileNameLength = cpu_to_le16(len); /* * BB could be 30 bytes or so longer if we used SMB2 specific * buffer lengths, but this is safe and close enough. */ output_size = min_t(unsigned int, output_size, server->maxBuf); output_size = min_t(unsigned int, output_size, 2 << 15); req->OutputBufferLength = cpu_to_le32(output_size); iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1; iov[1].iov_base = (char *)(req->Buffer); iov[1].iov_len = len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 2; trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, output_size); rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; if (rc) { if (rc == -ENODATA && rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, 0); srch_inf->endOfSearch = true; rc = 0; } else { trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, 0, rc); cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); } goto qdir_exit; } rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, info_buf_size); if (rc) { trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, 0, rc); goto qdir_exit; } srch_inf->unicode = true; if (srch_inf->ntwrk_buf_start) { if (srch_inf->smallBuf) cifs_small_buf_release(srch_inf->ntwrk_buf_start); else cifs_buf_release(srch_inf->ntwrk_buf_start); } srch_inf->ntwrk_buf_start = (char *)rsp; srch_inf->srch_entries_start = srch_inf->last_entry = (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset); end_of_smb = rsp_iov.iov_len + (char *)rsp; srch_inf->entries_in_buffer = num_entries(srch_inf->srch_entries_start, end_of_smb, &srch_inf->last_entry, info_buf_size); srch_inf->index_of_last_entry += srch_inf->entries_in_buffer; cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n", srch_inf->entries_in_buffer, srch_inf->index_of_last_entry, srch_inf->srch_entries_start, srch_inf->last_entry); if (resp_buftype == CIFS_LARGE_BUFFER) srch_inf->smallBuf = false; else if (resp_buftype == CIFS_SMALL_BUFFER) srch_inf->smallBuf = true; else cifs_dbg(VFS, "illegal search buffer type\n"); trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid, tcon->ses->Suid, index, srch_inf->entries_in_buffer); return rc; qdir_exit: free_rsp_buf(resp_buftype, rsp); return rc; } int SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, u8 info_type, u32 additional_info, void **data, unsigned int *size) { struct smb2_set_info_req *req; struct kvec *iov = rqst->rq_iov; unsigned int i, total_len; int rc; rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len); if (rc) return rc; req->sync_hdr.ProcessId = cpu_to_le32(pid); req->InfoType = info_type; req->FileInfoClass = info_class; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; req->AdditionalInformation = cpu_to_le32(additional_info); req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req) - 1); req->BufferLength = cpu_to_le32(*size); memcpy(req->Buffer, *data, *size); total_len += *size; iov[0].iov_base = (char *)req; /* 1 for Buffer */ iov[0].iov_len = total_len - 1; for (i = 1; i < rqst->rq_nvec; i++) { le32_add_cpu(&req->BufferLength, size[i]); iov[i].iov_base = (char *)data[i]; iov[i].iov_len = size[i]; } return 0; } void SMB2_set_info_free(struct smb_rqst *rqst) { if (rqst && rqst->rq_iov) cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */ } static int send_set_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class, u8 info_type, u32 additional_info, unsigned int num, void **data, unsigned int *size) { struct smb_rqst rqst; struct smb2_set_info_rsp *rsp = NULL; struct kvec *iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; int flags = 0; if (!ses || !(ses->server)) return -EIO; if (!num) return -EINVAL; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL); if (!iov) return -ENOMEM; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = num; rc = SMB2_set_info_init(tcon, &rqst, persistent_fid, volatile_fid, pid, info_class, info_type, additional_info, data, size); if (rc) { kfree(iov); return rc; } rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); SMB2_set_info_free(&rqst); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) { cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); trace_smb3_set_info_err(xid, persistent_fid, tcon->tid, ses->Suid, info_class, (__u32)info_type, rc); } free_rsp_buf(resp_buftype, rsp); kfree(iov); return rc; } int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 pid, __le64 *eof) { struct smb2_file_eof_info info; void *data; unsigned int size; info.EndOfFile = *eof; data = &info; size = sizeof(struct smb2_file_eof_info); return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE, 0, 1, &data, &size); } int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct cifs_ntsd *pnntsd, int pacllen, int aclflag) { return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, 1, (void **)&pnntsd, &pacllen); } int SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct smb2_file_full_ea_info *buf, int len) { return send_set_info(xid, tcon, persistent_fid, volatile_fid, current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, 1, (void **)&buf, &len); } int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, __u8 oplock_level) { struct smb_rqst rqst; int rc; struct smb2_oplock_break *req = NULL; struct cifs_ses *ses = tcon->ses; int flags = CIFS_OBREAK_OP; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; cifs_dbg(FYI, "SMB2_oplock_break\n"); rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->VolatileFid = volatile_fid; req->PersistentFid = persistent_fid; req->OplockLevel = oplock_level; req->sync_hdr.CreditRequest = cpu_to_le16(1); flags |= CIFS_NO_RESP; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc); } return rc; } void smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, struct kstatfs *kst) { kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); kst->f_bfree = kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } static void copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data, struct kstatfs *kst) { kst->f_bsize = le32_to_cpu(response_data->BlockSize); kst->f_blocks = le64_to_cpu(response_data->TotalBlocks); kst->f_bfree = le64_to_cpu(response_data->BlocksAvail); if (response_data->UserBlocksAvail == cpu_to_le64(-1)) kst->f_bavail = kst->f_bfree; else kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail); if (response_data->TotalFileNodes != cpu_to_le64(-1)) kst->f_files = le64_to_cpu(response_data->TotalFileNodes); if (response_data->FreeFileNodes != cpu_to_le64(-1)) kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes); return; } static int build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level, int outbuf_len, u64 persistent_fid, u64 volatile_fid) { int rc; struct smb2_query_info_req *req; unsigned int total_len; cifs_dbg(FYI, "Query FSInfo level %d\n", level); if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) return -EIO; rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req, &total_len); if (rc) return rc; req->InfoType = SMB2_O_INFO_FILESYSTEM; req->FileInfoClass = level; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; /* 1 for pad */ req->InputBufferOffset = cpu_to_le16(sizeof(struct smb2_query_info_req) - 1); req->OutputBufferLength = cpu_to_le32( outbuf_len + sizeof(struct smb2_query_info_rsp) - 1); iov->iov_base = (char *)req; iov->iov_len = total_len; return 0; } int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; FILE_SYSTEM_POSIX_INFO *info = NULL; int flags = 0; rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION, sizeof(FILE_SYSTEM_POSIX_INFO), persistent_fid, volatile_fid); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = &iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto posix_qfsinf_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; info = (FILE_SYSTEM_POSIX_INFO *)( le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, sizeof(FILE_SYSTEM_POSIX_INFO)); if (!rc) copy_posix_fs_info_to_kstatfs(info, fsdata); posix_qfsinf_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype; struct cifs_ses *ses = tcon->ses; struct smb2_fs_full_size_info *info = NULL; int flags = 0; rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION, sizeof(struct smb2_fs_full_size_info), persistent_fid, volatile_fid); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = &iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsinf_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; info = (struct smb2_fs_full_size_info *)( le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp); rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset), le32_to_cpu(rsp->OutputBufferLength), &rsp_iov, sizeof(struct smb2_fs_full_size_info)); if (!rc) smb2_copy_fs_info_to_kstatfs(info, fsdata); qfsinf_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, int level) { struct smb_rqst rqst; struct smb2_query_info_rsp *rsp = NULL; struct kvec iov; struct kvec rsp_iov; int rc = 0; int resp_buftype, max_len, min_len; struct cifs_ses *ses = tcon->ses; unsigned int rsp_len, offset; int flags = 0; if (level == FS_DEVICE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); min_len = sizeof(FILE_SYSTEM_DEVICE_INFO); } else if (level == FS_ATTRIBUTE_INFORMATION) { max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO); min_len = MIN_FS_ATTR_INFO_SIZE; } else if (level == FS_SECTOR_SIZE_INFORMATION) { max_len = sizeof(struct smb3_fs_ss_info); min_len = sizeof(struct smb3_fs_ss_info); } else if (level == FS_VOLUME_INFORMATION) { max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN; min_len = sizeof(struct smb3_fs_vol_info); } else { cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level); return -EINVAL; } rc = build_qfs_info_req(&iov, tcon, level, max_len, persistent_fid, volatile_fid); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = &iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(iov.iov_base); if (rc) { cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); goto qfsattr_exit; } rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; rsp_len = le32_to_cpu(rsp->OutputBufferLength); offset = le16_to_cpu(rsp->OutputBufferOffset); rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len); if (rc) goto qfsattr_exit; if (level == FS_ATTRIBUTE_INFORMATION) memcpy(&tcon->fsAttrInfo, offset + (char *)rsp, min_t(unsigned int, rsp_len, max_len)); else if (level == FS_DEVICE_INFORMATION) memcpy(&tcon->fsDevInfo, offset + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO)); else if (level == FS_SECTOR_SIZE_INFORMATION) { struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *) (offset + (char *)rsp); tcon->ss_flags = le32_to_cpu(ss_info->Flags); tcon->perf_sector_size = le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf); } else if (level == FS_VOLUME_INFORMATION) { struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *) (offset + (char *)rsp); tcon->vol_serial_number = vol_info->VolumeSerialNumber; tcon->vol_create_time = vol_info->VolumeCreationTime; } qfsattr_exit: free_rsp_buf(resp_buftype, rsp_iov.iov_base); return rc; } int smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u32 num_lock, struct smb2_lock_element *buf) { struct smb_rqst rqst; int rc = 0; struct smb2_lock_req *req = NULL; struct kvec iov[2]; struct kvec rsp_iov; int resp_buf_type; unsigned int count; int flags = CIFS_NO_RESP; unsigned int total_len; cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->sync_hdr.ProcessId = cpu_to_le32(pid); req->LockCount = cpu_to_le16(num_lock); req->PersistentFileId = persist_fid; req->VolatileFileId = volatile_fid; count = num_lock * sizeof(struct smb2_lock_element); iov[0].iov_base = (char *)req; iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); iov[1].iov_base = (char *)buf; iov[1].iov_len = count; cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 2; rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); if (rc) { cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); trace_smb3_lock_err(xid, persist_fid, tcon->tid, tcon->ses->Suid, rc); } return rc; } int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon, const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid, const __u64 length, const __u64 offset, const __u32 lock_flags, const bool wait) { struct smb2_lock_element lock; lock.Offset = cpu_to_le64(offset); lock.Length = cpu_to_le64(length); lock.Flags = cpu_to_le32(lock_flags); if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK) lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY); return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock); } int SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, __u8 *lease_key, const __le32 lease_state) { struct smb_rqst rqst; int rc; struct smb2_lease_ack *req = NULL; struct cifs_ses *ses = tcon->ses; int flags = CIFS_OBREAK_OP; unsigned int total_len; struct kvec iov[1]; struct kvec rsp_iov; int resp_buf_type; __u64 *please_key_high; __u64 *please_key_low; cifs_dbg(FYI, "SMB2_lease_break\n"); rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req, &total_len); if (rc) return rc; if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; req->sync_hdr.CreditRequest = cpu_to_le16(1); req->StructureSize = cpu_to_le16(36); total_len += 12; memcpy(req->LeaseKey, lease_key, 16); req->LeaseState = lease_state; flags |= CIFS_NO_RESP; iov[0].iov_base = (char *)req; iov[0].iov_len = total_len; memset(&rqst, 0, sizeof(struct smb_rqst)); rqst.rq_iov = iov; rqst.rq_nvec = 1; rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); cifs_small_buf_release(req); please_key_low = (__u64 *)lease_key; please_key_high = (__u64 *)(lease_key+8); if (rc) { cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, ses->Suid, *please_key_low, *please_key_high, rc); cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc); } else trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid, ses->Suid, *please_key_low, *please_key_high); return rc; }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_1057_0
crossvul-cpp_data_bad_2030_0
/* * Routines having to do with the 'struct sk_buff' memory handlers. * * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> * Florian La Roche <rzsfl@rz.uni-sb.de> * * Fixes: * Alan Cox : Fixed the worst of the load * balancer bugs. * Dave Platt : Interrupt stacking fix. * Richard Kooijman : Timestamp fixes. * Alan Cox : Changed buffer format. * Alan Cox : destructor hook for AF_UNIX etc. * Linus Torvalds : Better skb_clone. * Alan Cox : Added skb_copy. * Alan Cox : Added all the changed routines Linus * only put in the headers * Ray VanTassle : Fixed --skb->lock in free * Alan Cox : skb_copy copy arp field * Andi Kleen : slabified it. * Robert Olsson : Removed skb_head_pool * * NOTE: * The __skb_ routines should be called with interrupts * disabled, or you better be *real* sure that the operation is atomic * with respect to whatever list is being frobbed (e.g. via lock_sock() * or via disabling bottom half handlers, etc). * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * The functions in this file will not compile correctly with gcc 2.4.x */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/netdevice.h> #ifdef CONFIG_NET_CLS_ACT #include <net/pkt_sched.h> #endif #include <linux/string.h> #include <linux/skbuff.h> #include <linux/splice.h> #include <linux/cache.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/scatterlist.h> #include <linux/errqueue.h> #include <linux/prefetch.h> #include <net/protocol.h> #include <net/dst.h> #include <net/sock.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include <asm/uaccess.h> #include <trace/events/skb.h> #include <linux/highmem.h> struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; /** * skb_panic - private function for out-of-line support * @skb: buffer * @sz: size * @addr: address * @msg: skb_over_panic or skb_under_panic * * Out-of-line support for skb_put() and skb_push(). * Called via the wrapper skb_over_panic() or skb_under_panic(). * Keep out of line to prevent kernel bloat. * __builtin_return_address is not used because it is not always reliable. */ static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, const char msg[]) { pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", msg, addr, skb->len, sz, skb->head, skb->data, (unsigned long)skb->tail, (unsigned long)skb->end, skb->dev ? skb->dev->name : "<NULL>"); BUG(); } static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); } static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) { skb_panic(skb, sz, addr, __func__); } /* * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells * the caller if emergency pfmemalloc reserves are being used. If it is and * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves * may be used. Otherwise, the packet data may be discarded until enough * memory is free */ #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, bool *pfmemalloc) { void *obj; bool ret_pfmemalloc = false; /* * Try a regular allocation, when that fails and we're not entitled * to the reserves, fail. */ obj = kmalloc_node_track_caller(size, flags | __GFP_NOMEMALLOC | __GFP_NOWARN, node); if (obj || !(gfp_pfmemalloc_allowed(flags))) goto out; /* Try again but now we are using pfmemalloc reserves */ ret_pfmemalloc = true; obj = kmalloc_node_track_caller(size, flags, node); out: if (pfmemalloc) *pfmemalloc = ret_pfmemalloc; return obj; } /* Allocate a new skbuff. We do this ourselves so we can fill in a few * 'private' fields and also do memory statistics to find all the * [BEEP] leaks. * */ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) { struct sk_buff *skb; /* Get the HEAD */ skb = kmem_cache_alloc_node(skbuff_head_cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); skb->head = NULL; skb->truesize = sizeof(struct sk_buff); atomic_set(&skb->users, 1); skb->mac_header = (typeof(skb->mac_header))~0U; out: return skb; } /** * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache * instead of head cache and allocate a cloned (child) skb. * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for * allocations in case the data is required for writeback * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of at least size bytes. The object has a reference count * of one. The return is the buffer. On a failure the return is %NULL. * * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int flags, int node) { struct kmem_cache *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; bool pfmemalloc; cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; /* Get the HEAD */ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; prefetchw(skb); /* We do our best to align skb_shared_info on a separate cache * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); if (!data) goto nodata; /* kmalloc(size) might give us more room than requested. * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ size = SKB_WITH_OVERHEAD(ksize(data)); prefetchw(data + size); /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); /* Account for allocated memory : skb + skb->head */ skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); if (flags & SKB_ALLOC_FCLONE) { struct sk_buff *child = skb + 1; atomic_t *fclone_ref = (atomic_t *) (child + 1); kmemcheck_annotate_bitfield(child, flags1); kmemcheck_annotate_bitfield(child, flags2); skb->fclone = SKB_FCLONE_ORIG; atomic_set(fclone_ref, 1); child->fclone = SKB_FCLONE_UNAVAILABLE; child->pfmemalloc = pfmemalloc; } out: return skb; nodata: kmem_cache_free(cache, skb); skb = NULL; goto out; } EXPORT_SYMBOL(__alloc_skb); /** * build_skb - build a network buffer * @data: data buffer provided by caller * @frag_size: size of fragment, or 0 if head was kmalloced * * Allocate a new &sk_buff. Caller provides space holding head and * skb_shared_info. @data must have been allocated by kmalloc() only if * @frag_size is 0, otherwise data should come from the page allocator. * The return is the new skb buffer. * On a failure the return is %NULL, and @data is not freed. * Notes : * Before IO, driver allocates only data buffer where NIC put incoming frame * Driver should add room at head (NET_SKB_PAD) and * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) * After IO, driver calls build_skb(), to allocate sk_buff and populate it * before giving packet to stack. * RX rings only contains data buffers, not full skbs. */ struct sk_buff *build_skb(void *data, unsigned int frag_size) { struct skb_shared_info *shinfo; struct sk_buff *skb; unsigned int size = frag_size ? : ksize(data); skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); if (!skb) return NULL; size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); memset(skb, 0, offsetof(struct sk_buff, tail)); skb->truesize = SKB_TRUESIZE(size); skb->head_frag = frag_size != 0; atomic_set(&skb->users, 1); skb->head = data; skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); return skb; } EXPORT_SYMBOL(build_skb); struct netdev_alloc_cache { struct page_frag frag; /* we maintain a pagecount bias, so that we dont dirty cache line * containing page->_count every time we allocate a fragment. */ unsigned int pagecnt_bias; }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { struct netdev_alloc_cache *nc; void *data = NULL; int order; unsigned long flags; local_irq_save(flags); nc = &__get_cpu_var(netdev_alloc_cache); if (unlikely(!nc->frag.page)) { refill: for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { gfp_t gfp = gfp_mask; if (order) gfp |= __GFP_COMP | __GFP_NOWARN; nc->frag.page = alloc_pages(gfp, order); if (likely(nc->frag.page)) break; if (--order < 0) goto end; } nc->frag.size = PAGE_SIZE << order; recycle: atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; nc->frag.offset = 0; } if (nc->frag.offset + fragsz > nc->frag.size) { /* avoid unnecessary locked operations if possible */ if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) goto recycle; goto refill; } data = page_address(nc->frag.page) + nc->frag.offset; nc->frag.offset += fragsz; nc->pagecnt_bias--; end: local_irq_restore(flags); return data; } /** * netdev_alloc_frag - allocate a page fragment * @fragsz: fragment size * * Allocates a frag from a page for receive buffer. * Uses GFP_ATOMIC allocations. */ void *netdev_alloc_frag(unsigned int fragsz) { return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); } EXPORT_SYMBOL(netdev_alloc_frag); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * * Allocate a new &sk_buff and assign it a usage count of one. The * buffer has unspecified headroom built in. Users should allocate * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * * %NULL is returned if there is no free memory. */ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask) { struct sk_buff *skb = NULL; unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { void *data; if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; data = __netdev_alloc_frag(fragsz, gfp_mask); if (likely(data)) { skb = build_skb(data, fragsz); if (unlikely(!skb)) put_page(virt_to_head_page(data)); } } else { skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); } if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; } return skb; } EXPORT_SYMBOL(__netdev_alloc_skb); void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { skb_fill_page_desc(skb, i, page, off, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } EXPORT_SYMBOL(skb_add_rx_frag); void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, unsigned int truesize) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_size_add(frag, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } EXPORT_SYMBOL(skb_coalesce_rx_frag); static void skb_drop_list(struct sk_buff **listp) { kfree_skb_list(*listp); *listp = NULL; } static inline void skb_drop_fraglist(struct sk_buff *skb) { skb_drop_list(&skb_shinfo(skb)->frag_list); } static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; skb_walk_frags(skb, list) skb_get(list); } static void skb_free_head(struct sk_buff *skb) { if (skb->head_frag) put_page(virt_to_head_page(skb->head)); else kfree(skb->head); } static void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, &skb_shinfo(skb)->dataref)) { if (skb_shinfo(skb)->nr_frags) { int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_unref(skb, i); } /* * If skb buf is from userspace, we need to notify the caller * the lower device DMA has done; */ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { struct ubuf_info *uarg; uarg = skb_shinfo(skb)->destructor_arg; if (uarg->callback) uarg->callback(uarg, true); } if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); skb_free_head(skb); } } /* * Free an skbuff by memory without cleaning the state. */ static void kfree_skbmem(struct sk_buff *skb) { struct sk_buff *other; atomic_t *fclone_ref; switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(skbuff_head_cache, skb); break; case SKB_FCLONE_ORIG: fclone_ref = (atomic_t *) (skb + 2); if (atomic_dec_and_test(fclone_ref)) kmem_cache_free(skbuff_fclone_cache, skb); break; case SKB_FCLONE_CLONE: fclone_ref = (atomic_t *) (skb + 1); other = skb - 1; /* The clone portion is available for * fast-cloning again. */ skb->fclone = SKB_FCLONE_UNAVAILABLE; if (atomic_dec_and_test(fclone_ref)) kmem_cache_free(skbuff_fclone_cache, other); break; } } static void skb_release_head_state(struct sk_buff *skb) { skb_dst_drop(skb); #ifdef CONFIG_XFRM secpath_put(skb->sp); #endif if (skb->destructor) { WARN_ON(in_irq()); skb->destructor(skb); } #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb->nfct); #endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(skb->nf_bridge); #endif /* XXX: IS this still necessary? - JHS */ #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif #endif } /* Free everything but the sk_buff shell. */ static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); if (likely(skb->head)) skb_release_data(skb); } /** * __kfree_skb - private function * @skb: buffer * * Free an sk_buff. Release anything attached to the buffer. * Clean the state. This is an internal helper function. Users should * always call kfree_skb */ void __kfree_skb(struct sk_buff *skb) { skb_release_all(skb); kfree_skbmem(skb); } EXPORT_SYMBOL(__kfree_skb); /** * kfree_skb - free an sk_buff * @skb: buffer to free * * Drop a reference to the buffer and free it if the usage count has * hit zero. */ void kfree_skb(struct sk_buff *skb) { if (unlikely(!skb)) return; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; trace_kfree_skb(skb, __builtin_return_address(0)); __kfree_skb(skb); } EXPORT_SYMBOL(kfree_skb); void kfree_skb_list(struct sk_buff *segs) { while (segs) { struct sk_buff *next = segs->next; kfree_skb(segs); segs = next; } } EXPORT_SYMBOL(kfree_skb_list); /** * skb_tx_error - report an sk_buff xmit error * @skb: buffer that triggered an error * * Report xmit error if a device callback is tracking this skb. * skb must be freed afterwards. */ void skb_tx_error(struct sk_buff *skb) { if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { struct ubuf_info *uarg; uarg = skb_shinfo(skb)->destructor_arg; if (uarg->callback) uarg->callback(uarg, false); skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } } EXPORT_SYMBOL(skb_tx_error); /** * consume_skb - free an skbuff * @skb: buffer to free * * Drop a ref to the buffer and free it if the usage count has hit zero * Functions identically to kfree_skb, but kfree_skb assumes that the frame * is being dropped after a failure and notes that */ void consume_skb(struct sk_buff *skb) { if (unlikely(!skb)) return; if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; trace_consume_skb(skb); __kfree_skb(skb); } EXPORT_SYMBOL(consume_skb); static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { new->tstamp = old->tstamp; new->dev = old->dev; new->transport_header = old->transport_header; new->network_header = old->network_header; new->mac_header = old->mac_header; new->inner_protocol = old->inner_protocol; new->inner_transport_header = old->inner_transport_header; new->inner_network_header = old->inner_network_header; new->inner_mac_header = old->inner_mac_header; skb_dst_copy(new, old); skb_copy_hash(new, old); new->ooo_okay = old->ooo_okay; new->no_fcs = old->no_fcs; new->encapsulation = old->encapsulation; #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); #endif memcpy(new->cb, old->cb, sizeof(old->cb)); new->csum = old->csum; new->local_df = old->local_df; new->pkt_type = old->pkt_type; new->ip_summed = old->ip_summed; skb_copy_queue_mapping(new, old); new->priority = old->priority; #if IS_ENABLED(CONFIG_IP_VS) new->ipvs_property = old->ipvs_property; #endif new->pfmemalloc = old->pfmemalloc; new->protocol = old->protocol; new->mark = old->mark; new->skb_iif = old->skb_iif; __nf_copy(new, old); #ifdef CONFIG_NET_SCHED new->tc_index = old->tc_index; #ifdef CONFIG_NET_CLS_ACT new->tc_verd = old->tc_verd; #endif #endif new->vlan_proto = old->vlan_proto; new->vlan_tci = old->vlan_tci; skb_copy_secmark(new, old); #ifdef CONFIG_NET_RX_BUSY_POLL new->napi_id = old->napi_id; #endif } /* * You should not add any new code to this function. Add it to * __copy_skb_header above instead. */ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) { #define C(x) n->x = skb->x n->next = n->prev = NULL; n->sk = NULL; __copy_skb_header(n, skb); C(len); C(data_len); C(mac_len); n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; n->cloned = 1; n->nohdr = 0; n->destructor = NULL; C(tail); C(end); C(head); C(head_frag); C(data); C(truesize); atomic_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; return n; #undef C } /** * skb_morph - morph one skb into another * @dst: the skb to receive the contents * @src: the skb to supply the contents * * This is identical to skb_clone except that the target skb is * supplied by the user. * * The target skb is returned upon exit. */ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) { skb_release_all(dst); return __skb_clone(dst, src); } EXPORT_SYMBOL_GPL(skb_morph); /** * skb_copy_ubufs - copy userspace skb frags buffers to kernel * @skb: the skb to modify * @gfp_mask: allocation priority * * This must be called on SKBTX_DEV_ZEROCOPY skb. * It will copy all frags into kernel and drop the reference * to userspace pages. * * If this function is called from an interrupt gfp_mask() must be * %GFP_ATOMIC. * * Returns 0 on success or a negative error code on failure * to allocate kernel memory to copy to. */ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { int i; int num_frags = skb_shinfo(skb)->nr_frags; struct page *page, *head = NULL; struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; for (i = 0; i < num_frags; i++) { u8 *vaddr; skb_frag_t *f = &skb_shinfo(skb)->frags[i]; page = alloc_page(gfp_mask); if (!page) { while (head) { struct page *next = (struct page *)page_private(head); put_page(head); head = next; } return -ENOMEM; } vaddr = kmap_atomic(skb_frag_page(f)); memcpy(page_address(page), vaddr + f->page_offset, skb_frag_size(f)); kunmap_atomic(vaddr); set_page_private(page, (unsigned long)head); head = page; } /* skb frags release userspace buffers */ for (i = 0; i < num_frags; i++) skb_frag_unref(skb, i); uarg->callback(uarg, false); /* skb frags point to kernel buffers */ for (i = num_frags - 1; i >= 0; i--) { __skb_fill_page_desc(skb, i, head, 0, skb_shinfo(skb)->frags[i].size); head = (struct page *)page_private(head); } skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; return 0; } EXPORT_SYMBOL_GPL(skb_copy_ubufs); /** * skb_clone - duplicate an sk_buff * @skb: buffer to clone * @gfp_mask: allocation priority * * Duplicate an &sk_buff. The new one is not owned by a socket. Both * copies share the same packet data but not structure. The new * buffer has a reference count of 1. If the allocation fails the * function returns %NULL otherwise the new buffer is returned. * * If this function is called from an interrupt gfp_mask() must be * %GFP_ATOMIC. */ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff *n; if (skb_orphan_frags(skb, gfp_mask)) return NULL; n = skb + 1; if (skb->fclone == SKB_FCLONE_ORIG && n->fclone == SKB_FCLONE_UNAVAILABLE) { atomic_t *fclone_ref = (atomic_t *) (n + 1); n->fclone = SKB_FCLONE_CLONE; atomic_inc(fclone_ref); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); if (!n) return NULL; kmemcheck_annotate_bitfield(n, flags1); kmemcheck_annotate_bitfield(n, flags2); n->fclone = SKB_FCLONE_UNAVAILABLE; } return __skb_clone(n, skb); } EXPORT_SYMBOL(skb_clone); static void skb_headers_offset_update(struct sk_buff *skb, int off) { /* Only adjust this if it actually is csum_start rather than csum */ if (skb->ip_summed == CHECKSUM_PARTIAL) skb->csum_start += off; /* {transport,network,mac}_header and tail are relative to skb->head */ skb->transport_header += off; skb->network_header += off; if (skb_mac_header_was_set(skb)) skb->mac_header += off; skb->inner_transport_header += off; skb->inner_network_header += off; skb->inner_mac_header += off; } static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { __copy_skb_header(new, old); skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } static inline int skb_alloc_rx_flag(const struct sk_buff *skb) { if (skb_pfmemalloc(skb)) return SKB_ALLOC_RX; return 0; } /** * skb_copy - create private copy of an sk_buff * @skb: buffer to copy * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and its data. This is used when the * caller wishes to modify the data and needs a private copy of the * data to alter. Returns %NULL on failure or the pointer to the buffer * on success. The returned buffer has a reference count of 1. * * As by-product this function converts non-linear &sk_buff to linear * one, so that &sk_buff becomes completely private and caller is allowed * to modify all the data of returned buffer. This means that this * function is not recommended for use in circumstances when only * header is going to be modified. Use pskb_copy() instead. */ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { int headerlen = skb_headroom(skb); unsigned int size = skb_end_offset(skb) + skb->data_len; struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; /* Set the data pointer */ skb_reserve(n, headerlen); /* Set the tail pointer and length */ skb_put(n, skb->len); if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) BUG(); copy_skb_header(n, skb); return n; } EXPORT_SYMBOL(skb_copy); /** * __pskb_copy - create copy of an sk_buff with private head. * @skb: buffer to copy * @headroom: headroom of new skb * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and part of its data, located * in header. Fragmented data remain shared. This is used when * the caller wishes to modify only header of &sk_buff and needs * private copy of the header to alter. Returns %NULL on failure * or the pointer to the buffer on success. * The returned buffer has a reference count of 1. */ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) { unsigned int size = skb_headlen(skb) + headroom; struct sk_buff *n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) goto out; /* Set the data pointer */ skb_reserve(n, headroom); /* Set the tail pointer and length */ skb_put(n, skb_headlen(skb)); /* Copy the bytes */ skb_copy_from_linear_data(skb, n->data, n->len); n->truesize += skb->data_len; n->data_len = skb->data_len; n->len = skb->len; if (skb_shinfo(skb)->nr_frags) { int i; if (skb_orphan_frags(skb, gfp_mask)) { kfree_skb(n); n = NULL; goto out; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; skb_frag_ref(skb, i); } skb_shinfo(n)->nr_frags = i; } if (skb_has_frag_list(skb)) { skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; skb_clone_fraglist(n); } copy_skb_header(n, skb); out: return n; } EXPORT_SYMBOL(__pskb_copy); /** * pskb_expand_head - reallocate header of &sk_buff * @skb: buffer to reallocate * @nhead: room to add at head * @ntail: room to add at tail * @gfp_mask: allocation priority * * Expands (or creates identical copy, if @nhead and @ntail are zero) * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have * reference count of 1. Returns zero in the case of success or error, * if expansion failed. In the last case, &sk_buff is not changed. * * All the pointers pointing into skb header may change and must be * reloaded after call to this function. */ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) { int i; u8 *data; int size = nhead + skb_end_offset(skb) + ntail; long off; BUG_ON(nhead < 0); if (skb_shared(skb)) BUG(); size = SKB_DATA_ALIGN(size); if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), gfp_mask, NUMA_NO_NODE, NULL); if (!data) goto nodata; size = SKB_WITH_OVERHEAD(ksize(data)); /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. */ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); memcpy((struct skb_shared_info *)(data + size), skb_shinfo(skb), offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); /* * if shinfo is shared we must drop the old head gracefully, but if it * is not we can just drop the old head and let the existing refcount * be since all we did is relocate the values */ if (skb_cloned(skb)) { /* copy this zero copy skb frags */ if (skb_orphan_frags(skb, gfp_mask)) goto nofrags; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_ref(skb, i); if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); skb_release_data(skb); } else { skb_free_head(skb); } off = (data + nhead) - skb->head; skb->head = data; skb->head_frag = 0; skb->data += off; #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->end = size; off = nhead; #else skb->end = skb->head + size; #endif skb->tail += off; skb_headers_offset_update(skb, nhead); skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); return 0; nofrags: kfree(data); nodata: return -ENOMEM; } EXPORT_SYMBOL(pskb_expand_head); /* Make private copy of skb with writable head and some headroom */ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) { struct sk_buff *skb2; int delta = headroom - skb_headroom(skb); if (delta <= 0) skb2 = pskb_copy(skb, GFP_ATOMIC); else { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { kfree_skb(skb2); skb2 = NULL; } } return skb2; } EXPORT_SYMBOL(skb_realloc_headroom); /** * skb_copy_expand - copy and expand sk_buff * @skb: buffer to copy * @newheadroom: new free bytes at head * @newtailroom: new free bytes at tail * @gfp_mask: allocation priority * * Make a copy of both an &sk_buff and its data and while doing so * allocate additional space. * * This is used when the caller wishes to modify the data and needs a * private copy of the data to alter as well as more space for new fields. * Returns %NULL on failure or the pointer to the buffer * on success. The returned buffer has a reference count of 1. * * You must pass %GFP_ATOMIC as the allocation priority if this function * is called from an interrupt. */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) { /* * Allocate the copy buffer */ struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE); int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; if (!n) return NULL; skb_reserve(n, newheadroom); /* Set the tail pointer and length */ skb_put(n, skb->len); head_copy_len = oldheadroom; head_copy_off = 0; if (newheadroom <= head_copy_len) head_copy_len = newheadroom; else head_copy_off = newheadroom - head_copy_len; /* Copy the linear header and data. */ if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, skb->len + head_copy_len)) BUG(); copy_skb_header(n, skb); skb_headers_offset_update(n, newheadroom - oldheadroom); return n; } EXPORT_SYMBOL(skb_copy_expand); /** * skb_pad - zero pad the tail of an skb * @skb: buffer to pad * @pad: space to pad * * Ensure that a buffer is followed by a padding area that is zero * filled. Used by network drivers which may DMA or transfer data * beyond the buffer end onto the wire. * * May return error in out of memory cases. The skb is freed on error. */ int skb_pad(struct sk_buff *skb, int pad) { int err; int ntail; /* If the skbuff is non linear tailroom is always zero.. */ if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { memset(skb->data+skb->len, 0, pad); return 0; } ntail = skb->data_len + pad - (skb->end - skb->tail); if (likely(skb_cloned(skb) || ntail > 0)) { err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); if (unlikely(err)) goto free_skb; } /* FIXME: The use of this function with non-linear skb's really needs * to be audited. */ err = skb_linearize(skb); if (unlikely(err)) goto free_skb; memset(skb->data + skb->len, 0, pad); return 0; free_skb: kfree_skb(skb); return err; } EXPORT_SYMBOL(skb_pad); /** * pskb_put - add data to the tail of a potentially fragmented buffer * @skb: start of the buffer to use * @tail: tail fragment of the buffer to use * @len: amount of data to add * * This function extends the used data area of the potentially * fragmented buffer. @tail must be the last fragment of @skb -- or * @skb itself. If this would exceed the total buffer size the kernel * will panic. A pointer to the first byte of the extra data is * returned. */ unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) { if (tail != skb) { skb->data_len += len; skb->len += len; } return skb_put(tail, len); } EXPORT_SYMBOL_GPL(pskb_put); /** * skb_put - add data to a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer. If this would * exceed the total buffer size the kernel will panic. A pointer to the * first byte of the extra data is returned. */ unsigned char *skb_put(struct sk_buff *skb, unsigned int len) { unsigned char *tmp = skb_tail_pointer(skb); SKB_LINEAR_ASSERT(skb); skb->tail += len; skb->len += len; if (unlikely(skb->tail > skb->end)) skb_over_panic(skb, len, __builtin_return_address(0)); return tmp; } EXPORT_SYMBOL(skb_put); /** * skb_push - add data to the start of a buffer * @skb: buffer to use * @len: amount of data to add * * This function extends the used data area of the buffer at the buffer * start. If this would exceed the total buffer headroom the kernel will * panic. A pointer to the first byte of the extra data is returned. */ unsigned char *skb_push(struct sk_buff *skb, unsigned int len) { skb->data -= len; skb->len += len; if (unlikely(skb->data<skb->head)) skb_under_panic(skb, len, __builtin_return_address(0)); return skb->data; } EXPORT_SYMBOL(skb_push); /** * skb_pull - remove data from the start of a buffer * @skb: buffer to use * @len: amount of data to remove * * This function removes data from the start of a buffer, returning * the memory to the headroom. A pointer to the next data in the buffer * is returned. Once the data has been pulled future pushes will overwrite * the old data. */ unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) { return skb_pull_inline(skb, len); } EXPORT_SYMBOL(skb_pull); /** * skb_trim - remove end from a buffer * @skb: buffer to alter * @len: new length * * Cut the length of a buffer down by removing data from the tail. If * the buffer is already under the length specified it is not modified. * The skb must be linear. */ void skb_trim(struct sk_buff *skb, unsigned int len) { if (skb->len > len) __skb_trim(skb, len); } EXPORT_SYMBOL(skb_trim); /* Trims skb to length len. It can change skb pointers. */ int ___pskb_trim(struct sk_buff *skb, unsigned int len) { struct sk_buff **fragp; struct sk_buff *frag; int offset = skb_headlen(skb); int nfrags = skb_shinfo(skb)->nr_frags; int i; int err; if (skb_cloned(skb) && unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) return err; i = 0; if (offset >= len) goto drop_pages; for (; i < nfrags; i++) { int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); if (end < len) { offset = end; continue; } skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); drop_pages: skb_shinfo(skb)->nr_frags = i; for (; i < nfrags; i++) skb_frag_unref(skb, i); if (skb_has_frag_list(skb)) skb_drop_fraglist(skb); goto done; } for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); fragp = &frag->next) { int end = offset + frag->len; if (skb_shared(frag)) { struct sk_buff *nfrag; nfrag = skb_clone(frag, GFP_ATOMIC); if (unlikely(!nfrag)) return -ENOMEM; nfrag->next = frag->next; consume_skb(frag); frag = nfrag; *fragp = frag; } if (end < len) { offset = end; continue; } if (end > len && unlikely((err = pskb_trim(frag, len - offset)))) return err; if (frag->next) skb_drop_list(&frag->next); break; } done: if (len > skb_headlen(skb)) { skb->data_len -= skb->len - len; skb->len = len; } else { skb->len = len; skb->data_len = 0; skb_set_tail_pointer(skb, len); } return 0; } EXPORT_SYMBOL(___pskb_trim); /** * __pskb_pull_tail - advance tail of skb header * @skb: buffer to reallocate * @delta: number of bytes to advance tail * * The function makes a sense only on a fragmented &sk_buff, * it expands header moving its tail forward and copying necessary * data from fragmented part. * * &sk_buff MUST have reference count of 1. * * Returns %NULL (and &sk_buff does not change) if pull failed * or value of new tail of skb in the case of success. * * All the pointers pointing into skb header may change and must be * reloaded after call to this function. */ /* Moves tail of skb head forward, copying data from fragmented part, * when it is necessary. * 1. It may fail due to malloc failure. * 2. It may change skb pointers. * * It is pretty complicated. Luckily, it is called only in exceptional cases. */ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) { /* If skb has not enough free space at tail, get new one * plus 128 bytes for future expansions. If we have enough * room at tail, reallocate without expansion only if skb is cloned. */ int i, k, eat = (skb->tail + delta) - skb->end; if (eat > 0 || skb_cloned(skb)) { if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, GFP_ATOMIC)) return NULL; } if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) BUG(); /* Optimization: no fragments, no reasons to preestimate * size of pulled pages. Superb. */ if (!skb_has_frag_list(skb)) goto pull_pages; /* Estimate size of pulled pages. */ eat = delta; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (size >= eat) goto pull_pages; eat -= size; } /* If we need update frag list, we are in troubles. * Certainly, it possible to add an offset to skb data, * but taking into account that pulling is expected to * be very rare operation, it is worth to fight against * further bloating skb head and crucify ourselves here instead. * Pure masohism, indeed. 8)8) */ if (eat) { struct sk_buff *list = skb_shinfo(skb)->frag_list; struct sk_buff *clone = NULL; struct sk_buff *insp = NULL; do { BUG_ON(!list); if (list->len <= eat) { /* Eaten as whole. */ eat -= list->len; list = list->next; insp = list; } else { /* Eaten partially. */ if (skb_shared(list)) { /* Sucks! We need to fork list. :-( */ clone = skb_clone(list, GFP_ATOMIC); if (!clone) return NULL; insp = list->next; list = clone; } else { /* This may be pulled without * problems. */ insp = list; } if (!pskb_pull(list, eat)) { kfree_skb(clone); return NULL; } break; } } while (eat); /* Free pulled out fragments. */ while ((list = skb_shinfo(skb)->frag_list) != insp) { skb_shinfo(skb)->frag_list = list->next; kfree_skb(list); } /* And insert new clone at head. */ if (clone) { clone->next = list; skb_shinfo(skb)->frag_list = clone; } } /* Success! Now we may commit changes to skb data. */ pull_pages: eat = delta; k = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (size <= eat) { skb_frag_unref(skb, i); eat -= size; } else { skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; if (eat) { skb_shinfo(skb)->frags[k].page_offset += eat; skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); eat = 0; } k++; } } skb_shinfo(skb)->nr_frags = k; skb->tail += delta; skb->data_len -= delta; return skb_tail_pointer(skb); } EXPORT_SYMBOL(__pskb_pull_tail); /** * skb_copy_bits - copy bits from skb to kernel buffer * @skb: source skb * @offset: offset in source * @to: destination buffer * @len: number of bytes to copy * * Copy the specified number of bytes from the source skb to the * destination buffer. * * CAUTION ! : * If its prototype is ever changed, * check arch/{*}/net/{*}.S files, * since it is called from BPF assembly code. */ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) { int start = skb_headlen(skb); struct sk_buff *frag_iter; int i, copy; if (offset > (int)skb->len - len) goto fault; /* Copy header. */ if ((copy = start - offset) > 0) { if (copy > len) copy = len; skb_copy_from_linear_data_offset(skb, offset, to, copy); if ((len -= copy) == 0) return 0; offset += copy; to += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; skb_frag_t *f = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(f); if ((copy = end - offset) > 0) { u8 *vaddr; if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(f)); memcpy(to, vaddr + f->page_offset + offset - start, copy); kunmap_atomic(vaddr); if ((len -= copy) == 0) return 0; offset += copy; to += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; if (skb_copy_bits(frag_iter, offset - start, to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; to += copy; } start = end; } if (!len) return 0; fault: return -EFAULT; } EXPORT_SYMBOL(skb_copy_bits); /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) { put_page(spd->pages[i]); } static struct page *linear_to_page(struct page *page, unsigned int *len, unsigned int *offset, struct sock *sk) { struct page_frag *pfrag = sk_page_frag(sk); if (!sk_page_frag_refill(sk, pfrag)) return NULL; *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); memcpy(page_address(pfrag->page) + pfrag->offset, page_address(page) + *offset, *len); *offset = pfrag->offset; pfrag->offset += *len; return pfrag->page; } static bool spd_can_coalesce(const struct splice_pipe_desc *spd, struct page *page, unsigned int offset) { return spd->nr_pages && spd->pages[spd->nr_pages - 1] == page && (spd->partial[spd->nr_pages - 1].offset + spd->partial[spd->nr_pages - 1].len == offset); } /* * Fill page/offset/length into spd, if it can hold more pages. */ static bool spd_fill_page(struct splice_pipe_desc *spd, struct pipe_inode_info *pipe, struct page *page, unsigned int *len, unsigned int offset, bool linear, struct sock *sk) { if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) return true; if (linear) { page = linear_to_page(page, len, &offset, sk); if (!page) return true; } if (spd_can_coalesce(spd, page, offset)) { spd->partial[spd->nr_pages - 1].len += *len; return false; } get_page(page); spd->pages[spd->nr_pages] = page; spd->partial[spd->nr_pages].len = *len; spd->partial[spd->nr_pages].offset = offset; spd->nr_pages++; return false; } static bool __splice_segment(struct page *page, unsigned int poff, unsigned int plen, unsigned int *off, unsigned int *len, struct splice_pipe_desc *spd, bool linear, struct sock *sk, struct pipe_inode_info *pipe) { if (!*len) return true; /* skip this segment if already processed */ if (*off >= plen) { *off -= plen; return false; } /* ignore any bits we already processed */ poff += *off; plen -= *off; *off = 0; do { unsigned int flen = min(*len, plen); if (spd_fill_page(spd, pipe, page, &flen, poff, linear, sk)) return true; poff += flen; plen -= flen; *len -= flen; } while (*len && plen); return false; } /* * Map linear and fragment data from the skb to spd. It reports true if the * pipe is full or if we already spliced the requested length. */ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, unsigned int *offset, unsigned int *len, struct splice_pipe_desc *spd, struct sock *sk) { int seg; /* map the linear part : * If skb->head_frag is set, this 'linear' part is backed by a * fragment, and if the head is not shared with any clones then * we can avoid a copy since we own the head portion of this page. */ if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), offset, len, spd, skb_head_is_locked(skb), sk, pipe)) return true; /* * then map the fragments */ for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; if (__splice_segment(skb_frag_page(f), f->page_offset, skb_frag_size(f), offset, len, spd, false, sk, pipe)) return true; } return false; } /* * Map data from the skb to a pipe. Should handle both the linear part, * the fragments, and the frag list. It does NOT handle frag lists within * the frag list, if such a thing exists. We'd probably need to recurse to * handle that cleanly. */ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) { struct partial_page partial[MAX_SKB_FRAGS]; struct page *pages[MAX_SKB_FRAGS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, .nr_pages_max = MAX_SKB_FRAGS, .flags = flags, .ops = &nosteal_pipe_buf_ops, .spd_release = sock_spd_release, }; struct sk_buff *frag_iter; struct sock *sk = skb->sk; int ret = 0; /* * __skb_splice_bits() only fails if the output has no room left, * so no point in going over the frag_list for the error case. */ if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) goto done; else if (!tlen) goto done; /* * now see if we have a frag_list to map */ skb_walk_frags(skb, frag_iter) { if (!tlen) break; if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) break; } done: if (spd.nr_pages) { /* * Drop the socket lock, otherwise we have reverse * locking dependencies between sk_lock and i_mutex * here as compared to sendfile(). We enter here * with the socket lock held, and splice_to_pipe() will * grab the pipe inode lock. For sendfile() emulation, * we call into ->sendpage() with the i_mutex lock held * and networking will grab the socket lock. */ release_sock(sk); ret = splice_to_pipe(pipe, &spd); lock_sock(sk); } return ret; } /** * skb_store_bits - store bits from kernel buffer to skb * @skb: destination buffer * @offset: offset in destination * @from: source buffer * @len: number of bytes to copy * * Copy the specified number of bytes from the source buffer to the * destination skb. This function handles all the messy bits of * traversing fragment lists and such. */ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) { int start = skb_headlen(skb); struct sk_buff *frag_iter; int i, copy; if (offset > (int)skb->len - len) goto fault; if ((copy = start - offset) > 0) { if (copy > len) copy = len; skb_copy_to_linear_data_offset(skb, offset, from, copy); if ((len -= copy) == 0) return 0; offset += copy; from += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int end; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { u8 *vaddr; if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(frag)); memcpy(vaddr + frag->page_offset + offset - start, from, copy); kunmap_atomic(vaddr); if ((len -= copy) == 0) return 0; offset += copy; from += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; if (skb_store_bits(frag_iter, offset - start, from, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; from += copy; } start = end; } if (!len) return 0; fault: return -EFAULT; } EXPORT_SYMBOL(skb_store_bits); /* Checksum skb data. */ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum, const struct skb_checksum_ops *ops) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int pos = 0; /* Checksum header. */ if (copy > 0) { if (copy > len) copy = len; csum = ops->update(skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; pos = copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; WARN_ON(start > offset + len); end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { __wsum csum2; u8 *vaddr; if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(frag)); csum2 = ops->update(vaddr + frag->page_offset + offset - start, copy, 0); kunmap_atomic(vaddr); csum = ops->combine(csum, csum2, pos, copy); if (!(len -= copy)) return csum; offset += copy; pos += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { __wsum csum2; if (copy > len) copy = len; csum2 = __skb_checksum(frag_iter, offset - start, copy, 0, ops); csum = ops->combine(csum, csum2, pos, copy); if ((len -= copy) == 0) return csum; offset += copy; pos += copy; } start = end; } BUG_ON(len); return csum; } EXPORT_SYMBOL(__skb_checksum); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) { const struct skb_checksum_ops ops = { .update = csum_partial_ext, .combine = csum_block_add_ext, }; return __skb_checksum(skb, offset, len, csum, &ops); } EXPORT_SYMBOL(skb_checksum); /* Both of above in one bottle. */ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int pos = 0; /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; csum = csum_partial_copy_nocheck(skb->data + offset, to, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; to += copy; pos = copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; WARN_ON(start > offset + len); end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { __wsum csum2; u8 *vaddr; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; vaddr = kmap_atomic(skb_frag_page(frag)); csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset + offset - start, to, copy, 0); kunmap_atomic(vaddr); csum = csum_block_add(csum, csum2, pos); if (!(len -= copy)) return csum; offset += copy; to += copy; pos += copy; } start = end; } skb_walk_frags(skb, frag_iter) { __wsum csum2; int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; csum2 = skb_copy_and_csum_bits(frag_iter, offset - start, to, copy, 0); csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; to += copy; pos += copy; } start = end; } BUG_ON(len); return csum; } EXPORT_SYMBOL(skb_copy_and_csum_bits); /** * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() * @from: source buffer * * Calculates the amount of linear headroom needed in the 'to' skb passed * into skb_zerocopy(). */ unsigned int skb_zerocopy_headlen(const struct sk_buff *from) { unsigned int hlen = 0; if (!from->head_frag || skb_headlen(from) < L1_CACHE_BYTES || skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) hlen = skb_headlen(from); if (skb_has_frag_list(from)) hlen = from->len; return hlen; } EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); /** * skb_zerocopy - Zero copy skb to skb * @to: destination buffer * @from: source buffer * @len: number of bytes to copy from source buffer * @hlen: size of linear headroom in destination buffer * * Copies up to `len` bytes from `from` to `to` by creating references * to the frags in the source buffer. * * The `hlen` as calculated by skb_zerocopy_headlen() specifies the * headroom in the `to` buffer. */ void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) { int i, j = 0; int plen = 0; /* length of skb->head fragment */ struct page *page; unsigned int offset; BUG_ON(!from->head_frag && !hlen); /* dont bother with small payloads */ if (len <= skb_tailroom(to)) { skb_copy_bits(from, 0, skb_put(to, len), len); return; } if (hlen) { skb_copy_bits(from, 0, skb_put(to, hlen), hlen); len -= hlen; } else { plen = min_t(int, skb_headlen(from), len); if (plen) { page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); __skb_fill_page_desc(to, 0, page, offset, plen); get_page(page); j = 1; len -= plen; } } to->truesize += len + plen; to->len += len + plen; to->data_len += len + plen; for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { if (!len) break; skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); len -= skb_shinfo(to)->frags[j].size; skb_frag_ref(to, j); j++; } skb_shinfo(to)->nr_frags = j; } EXPORT_SYMBOL_GPL(skb_zerocopy); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) { __wsum csum; long csstart; if (skb->ip_summed == CHECKSUM_PARTIAL) csstart = skb_checksum_start_offset(skb); else csstart = skb_headlen(skb); BUG_ON(csstart > skb_headlen(skb)); skb_copy_from_linear_data(skb, to, csstart); csum = 0; if (csstart != skb->len) csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, skb->len - csstart, 0); if (skb->ip_summed == CHECKSUM_PARTIAL) { long csstuff = csstart + skb->csum_offset; *((__sum16 *)(to + csstuff)) = csum_fold(csum); } } EXPORT_SYMBOL(skb_copy_and_csum_dev); /** * skb_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. The list lock is taken so the function * may be used safely with other locking list functions. The head item is * returned or %NULL if the list is empty. */ struct sk_buff *skb_dequeue(struct sk_buff_head *list) { unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue(list); spin_unlock_irqrestore(&list->lock, flags); return result; } EXPORT_SYMBOL(skb_dequeue); /** * skb_dequeue_tail - remove from the tail of the queue * @list: list to dequeue from * * Remove the tail of the list. The list lock is taken so the function * may be used safely with other locking list functions. The tail item is * returned or %NULL if the list is empty. */ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) { unsigned long flags; struct sk_buff *result; spin_lock_irqsave(&list->lock, flags); result = __skb_dequeue_tail(list); spin_unlock_irqrestore(&list->lock, flags); return result; } EXPORT_SYMBOL(skb_dequeue_tail); /** * skb_queue_purge - empty a list * @list: list to empty * * Delete all buffers on an &sk_buff list. Each buffer is removed from * the list and one reference dropped. This function takes the list * lock and is atomic with respect to other list locking functions. */ void skb_queue_purge(struct sk_buff_head *list) { struct sk_buff *skb; while ((skb = skb_dequeue(list)) != NULL) kfree_skb(skb); } EXPORT_SYMBOL(skb_queue_purge); /** * skb_queue_head - queue a buffer at the list head * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the start of the list. This function takes the * list lock and can be used safely with other locking &sk_buff functions * safely. * * A buffer cannot be placed on two lists at the same time. */ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_head(list, newsk); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_queue_head); /** * skb_queue_tail - queue a buffer at the list tail * @list: list to use * @newsk: buffer to queue * * Queue a buffer at the tail of the list. This function takes the * list lock and can be used safely with other locking &sk_buff functions * safely. * * A buffer cannot be placed on two lists at the same time. */ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_tail(list, newsk); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_queue_tail); /** * skb_unlink - remove a buffer from a list * @skb: buffer to remove * @list: list to use * * Remove a packet from a list. The list locks are taken and this * function is atomic with respect to other list locked calls * * You must know what list the SKB is on. */ void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_unlink(skb, list); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_unlink); /** * skb_append - append a buffer * @old: buffer to insert after * @newsk: buffer to insert * @list: list to use * * Place a packet after a given packet in a list. The list locks are taken * and this function is atomic with respect to other list locked calls. * A buffer cannot be placed on two lists at the same time. */ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_queue_after(list, old, newsk); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_append); /** * skb_insert - insert a buffer * @old: buffer to insert before * @newsk: buffer to insert * @list: list to use * * Place a packet before a given packet in a list. The list locks are * taken and this function is atomic with respect to other list locked * calls. * * A buffer cannot be placed on two lists at the same time. */ void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) { unsigned long flags; spin_lock_irqsave(&list->lock, flags); __skb_insert(newsk, old->prev, old, list); spin_unlock_irqrestore(&list->lock, flags); } EXPORT_SYMBOL(skb_insert); static inline void skb_split_inside_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, const int pos) { int i; skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), pos - len); /* And move data appendix as is. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags = 0; skb1->data_len = skb->data_len; skb1->len += skb1->data_len; skb->data_len = 0; skb->len = len; skb_set_tail_pointer(skb, len); } static inline void skb_split_no_header(struct sk_buff *skb, struct sk_buff* skb1, const u32 len, int pos) { int i, k = 0; const int nfrags = skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags = 0; skb1->len = skb1->data_len = skb->len - len; skb->len = len; skb->data_len = len - pos; for (i = 0; i < nfrags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (pos + size > len) { skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; if (pos < len) { /* Split frag. * We have two variants in this case: * 1. Move all the frag to the second * part, if it is possible. F.e. * this approach is mandatory for TUX, * where splitting is expensive. * 2. Split is accurately. We make this. */ skb_frag_ref(skb, i); skb_shinfo(skb1)->frags[0].page_offset += len - pos; skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); skb_shinfo(skb)->nr_frags++; } k++; } else skb_shinfo(skb)->nr_frags++; pos += size; } skb_shinfo(skb1)->nr_frags = k; } /** * skb_split - Split fragmented skb to two parts at length len. * @skb: the buffer to split * @skb1: the buffer to receive the second part * @len: new length for skb */ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) { int pos = skb_headlen(skb); skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; if (len < pos) /* Split line is inside header. */ skb_split_inside_header(skb, skb1, len, pos); else /* Second chunk has no header, nothing to copy. */ skb_split_no_header(skb, skb1, len, pos); } EXPORT_SYMBOL(skb_split); /* Shifting from/to a cloned skb is a no-go. * * Caller cannot keep skb_shinfo related pointers past calling here! */ static int skb_prepare_for_shift(struct sk_buff *skb) { return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } /** * skb_shift - Shifts paged data partially from skb to another * @tgt: buffer into which tail data gets added * @skb: buffer from which the paged data comes from * @shiftlen: shift up to this many bytes * * Attempts to shift up to shiftlen worth of bytes, which may be less than * the length of the skb, from skb to tgt. Returns number bytes shifted. * It's up to caller to free skb if everything was shifted. * * If @tgt runs out of frags, the whole operation is aborted. * * Skb cannot include anything else but paged data while tgt is allowed * to have non-paged data as well. * * TODO: full sized shift could be optimized but that would need * specialized skb free'er to handle frags without up-to-date nr_frags. */ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) { int from, to, merge, todo; struct skb_frag_struct *fragfrom, *fragto; BUG_ON(shiftlen > skb->len); BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ todo = shiftlen; from = 0; to = skb_shinfo(tgt)->nr_frags; fragfrom = &skb_shinfo(skb)->frags[from]; /* Actual merge is delayed until the point when we know we can * commit all, so that we don't have to undo partial changes */ if (!to || !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), fragfrom->page_offset)) { merge = -1; } else { merge = to - 1; todo -= skb_frag_size(fragfrom); if (todo < 0) { if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) return 0; /* All previous frag pointers might be stale! */ fragfrom = &skb_shinfo(skb)->frags[from]; fragto = &skb_shinfo(tgt)->frags[merge]; skb_frag_size_add(fragto, shiftlen); skb_frag_size_sub(fragfrom, shiftlen); fragfrom->page_offset += shiftlen; goto onlymerged; } from++; } /* Skip full, not-fitting skb to avoid expensive operations */ if ((shiftlen == skb->len) && (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) return 0; if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) return 0; while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { if (to == MAX_SKB_FRAGS) return 0; fragfrom = &skb_shinfo(skb)->frags[from]; fragto = &skb_shinfo(tgt)->frags[to]; if (todo >= skb_frag_size(fragfrom)) { *fragto = *fragfrom; todo -= skb_frag_size(fragfrom); from++; to++; } else { __skb_frag_ref(fragfrom); fragto->page = fragfrom->page; fragto->page_offset = fragfrom->page_offset; skb_frag_size_set(fragto, todo); fragfrom->page_offset += todo; skb_frag_size_sub(fragfrom, todo); todo = 0; to++; break; } } /* Ready to "commit" this state change to tgt */ skb_shinfo(tgt)->nr_frags = to; if (merge >= 0) { fragfrom = &skb_shinfo(skb)->frags[0]; fragto = &skb_shinfo(tgt)->frags[merge]; skb_frag_size_add(fragto, skb_frag_size(fragfrom)); __skb_frag_unref(fragfrom); } /* Reposition in the original skb */ to = 0; while (from < skb_shinfo(skb)->nr_frags) skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; skb_shinfo(skb)->nr_frags = to; BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); onlymerged: /* Most likely the tgt won't ever need its checksum anymore, skb on * the other hand might need it if it needs to be resent */ tgt->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL; /* Yak, is it really working this way? Some helper please? */ skb->len -= shiftlen; skb->data_len -= shiftlen; skb->truesize -= shiftlen; tgt->len += shiftlen; tgt->data_len += shiftlen; tgt->truesize += shiftlen; return shiftlen; } /** * skb_prepare_seq_read - Prepare a sequential read of skb data * @skb: the buffer to read * @from: lower offset of data to be read * @to: upper offset of data to be read * @st: state variable * * Initializes the specified state variable. Must be called before * invoking skb_seq_read() for the first time. */ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, unsigned int to, struct skb_seq_state *st) { st->lower_offset = from; st->upper_offset = to; st->root_skb = st->cur_skb = skb; st->frag_idx = st->stepped_offset = 0; st->frag_data = NULL; } EXPORT_SYMBOL(skb_prepare_seq_read); /** * skb_seq_read - Sequentially read skb data * @consumed: number of bytes consumed by the caller so far * @data: destination pointer for data to be returned * @st: state variable * * Reads a block of skb data at @consumed relative to the * lower offset specified to skb_prepare_seq_read(). Assigns * the head of the data block to @data and returns the length * of the block or 0 if the end of the skb data or the upper * offset has been reached. * * The caller is not required to consume all of the data * returned, i.e. @consumed is typically set to the number * of bytes already consumed and the next call to * skb_seq_read() will return the remaining part of the block. * * Note 1: The size of each block of data returned can be arbitrary, * this limitation is the cost for zerocopy seqeuental * reads of potentially non linear data. * * Note 2: Fragment lists within fragments are not implemented * at the moment, state->root_skb could be replaced with * a stack for this purpose. */ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, struct skb_seq_state *st) { unsigned int block_limit, abs_offset = consumed + st->lower_offset; skb_frag_t *frag; if (unlikely(abs_offset >= st->upper_offset)) { if (st->frag_data) { kunmap_atomic(st->frag_data); st->frag_data = NULL; } return 0; } next_skb: block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; if (abs_offset < block_limit && !st->frag_data) { *data = st->cur_skb->data + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } if (st->frag_idx == 0 && !st->frag_data) st->stepped_offset += skb_headlen(st->cur_skb); while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; block_limit = skb_frag_size(frag) + st->stepped_offset; if (abs_offset < block_limit) { if (!st->frag_data) st->frag_data = kmap_atomic(skb_frag_page(frag)); *data = (u8 *) st->frag_data + frag->page_offset + (abs_offset - st->stepped_offset); return block_limit - abs_offset; } if (st->frag_data) { kunmap_atomic(st->frag_data); st->frag_data = NULL; } st->frag_idx++; st->stepped_offset += skb_frag_size(frag); } if (st->frag_data) { kunmap_atomic(st->frag_data); st->frag_data = NULL; } if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->frag_idx = 0; goto next_skb; } else if (st->cur_skb->next) { st->cur_skb = st->cur_skb->next; st->frag_idx = 0; goto next_skb; } return 0; } EXPORT_SYMBOL(skb_seq_read); /** * skb_abort_seq_read - Abort a sequential read of skb data * @st: state variable * * Must be called if skb_seq_read() was not called until it * returned 0. */ void skb_abort_seq_read(struct skb_seq_state *st) { if (st->frag_data) kunmap_atomic(st->frag_data); } EXPORT_SYMBOL(skb_abort_seq_read); #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, struct ts_config *conf, struct ts_state *state) { return skb_seq_read(offset, text, TS_SKB_CB(state)); } static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) { skb_abort_seq_read(TS_SKB_CB(state)); } /** * skb_find_text - Find a text pattern in skb data * @skb: the buffer to look in * @from: search offset * @to: search limit * @config: textsearch configuration * @state: uninitialized textsearch state variable * * Finds a pattern in the skb data according to the specified * textsearch configuration. Use textsearch_next() to retrieve * subsequent occurrences of the pattern. Returns the offset * to the first occurrence or UINT_MAX if no match was found. */ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state) { unsigned int ret; config->get_next_block = skb_ts_get_next_block; config->finish = skb_ts_finish; skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); ret = textsearch_find(config, state); return (ret <= to - from ? ret : UINT_MAX); } EXPORT_SYMBOL(skb_find_text); /** * skb_append_datato_frags - append the user data to a skb * @sk: sock structure * @skb: skb structure to be appened with user data. * @getfrag: call back function to be used for getting the user data * @from: pointer to user message iov * @length: length of the iov message * * Description: This procedure append the user data in the fragment part * of the skb if any page alloc fails user this procedure returns -ENOMEM */ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int (*getfrag)(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length) { int frg_cnt = skb_shinfo(skb)->nr_frags; int copy; int offset = 0; int ret; struct page_frag *pfrag = &current->task_frag; do { /* Return error if we don't have space for new frag */ if (frg_cnt >= MAX_SKB_FRAGS) return -EMSGSIZE; if (!sk_page_frag_refill(sk, pfrag)) return -ENOMEM; /* copy the user data to page */ copy = min_t(int, length, pfrag->size - pfrag->offset); ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, offset, copy, 0, skb); if (ret < 0) return -EFAULT; /* copy was successful so update the size parameters */ skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, copy); frg_cnt++; pfrag->offset += copy; get_page(pfrag->page); skb->truesize += copy; atomic_add(copy, &sk->sk_wmem_alloc); skb->len += copy; skb->data_len += copy; offset += copy; length -= copy; } while (length > 0); return 0; } EXPORT_SYMBOL(skb_append_datato_frags); /** * skb_pull_rcsum - pull skb and update receive checksum * @skb: buffer to update * @len: length of data pulled * * This function performs an skb_pull on the packet and updates * the CHECKSUM_COMPLETE checksum. It should be used on * receive path processing instead of skb_pull unless you know * that the checksum difference is zero (e.g., a valid IP header) * or you are setting ip_summed to CHECKSUM_NONE. */ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) { BUG_ON(len > skb->len); skb->len -= len; BUG_ON(skb->len < skb->data_len); skb_postpull_rcsum(skb, skb->data, len); return skb->data += len; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); /** * skb_segment - Perform protocol segmentation on skb. * @head_skb: buffer to segment * @features: features for the output path (see dev->features) * * This function performs segmentation on the given skb. It returns * a pointer to the first in a list of new skbs for the segments. * In case of error it returns ERR_PTR(err). */ struct sk_buff *skb_segment(struct sk_buff *head_skb, netdev_features_t features) { struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; skb_frag_t *frag = skb_shinfo(head_skb)->frags; unsigned int mss = skb_shinfo(head_skb)->gso_size; unsigned int doffset = head_skb->data - skb_mac_header(head_skb); unsigned int offset = doffset; unsigned int tnl_hlen = skb_tnl_header_len(head_skb); unsigned int headroom; unsigned int len; __be16 proto; bool csum; int sg = !!(features & NETIF_F_SG); int nfrags = skb_shinfo(head_skb)->nr_frags; int err = -ENOMEM; int i = 0; int pos; proto = skb_network_protocol(head_skb); if (unlikely(!proto)) return ERR_PTR(-EINVAL); csum = !!can_checksum_protocol(features, proto); __skb_push(head_skb, doffset); headroom = skb_headroom(head_skb); pos = skb_headlen(head_skb); do { struct sk_buff *nskb; skb_frag_t *nskb_frag; int hsize; int size; len = head_skb->len - offset; if (len > mss) len = mss; hsize = skb_headlen(head_skb) - offset; if (hsize < 0) hsize = 0; if (hsize > len || !sg) hsize = len; if (!hsize && i >= nfrags && skb_headlen(list_skb) && (skb_headlen(list_skb) == len || sg)) { BUG_ON(skb_headlen(list_skb) > len); i = 0; nfrags = skb_shinfo(list_skb)->nr_frags; frag = skb_shinfo(list_skb)->frags; pos += skb_headlen(list_skb); while (pos < offset + len) { BUG_ON(i >= nfrags); size = skb_frag_size(frag); if (pos + size > offset + len) break; i++; pos += size; frag++; } nskb = skb_clone(list_skb, GFP_ATOMIC); list_skb = list_skb->next; if (unlikely(!nskb)) goto err; if (unlikely(pskb_trim(nskb, len))) { kfree_skb(nskb); goto err; } hsize = skb_end_offset(nskb); if (skb_cow_head(nskb, doffset + headroom)) { kfree_skb(nskb); goto err; } nskb->truesize += skb_end_offset(nskb) - hsize; skb_release_head_state(nskb); __skb_push(nskb, doffset); } else { nskb = __alloc_skb(hsize + doffset + headroom, GFP_ATOMIC, skb_alloc_rx_flag(head_skb), NUMA_NO_NODE); if (unlikely(!nskb)) goto err; skb_reserve(nskb, headroom); __skb_put(nskb, doffset); } if (segs) tail->next = nskb; else segs = nskb; tail = nskb; __copy_skb_header(nskb, head_skb); nskb->mac_len = head_skb->mac_len; skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, nskb->data - tnl_hlen, doffset + tnl_hlen); if (nskb->len == len + doffset) goto perform_csum_check; if (!sg) { nskb->ip_summed = CHECKSUM_NONE; nskb->csum = skb_copy_and_csum_bits(head_skb, offset, skb_put(nskb, len), len, 0); continue; } nskb_frag = skb_shinfo(nskb)->frags; skb_copy_from_linear_data_offset(head_skb, offset, skb_put(nskb, hsize), hsize); skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & SKBTX_SHARED_FRAG; while (pos < offset + len) { if (i >= nfrags) { BUG_ON(skb_headlen(list_skb)); i = 0; nfrags = skb_shinfo(list_skb)->nr_frags; frag = skb_shinfo(list_skb)->frags; BUG_ON(!nfrags); list_skb = list_skb->next; } if (unlikely(skb_shinfo(nskb)->nr_frags >= MAX_SKB_FRAGS)) { net_warn_ratelimited( "skb_segment: too many frags: %u %u\n", pos, mss); goto err; } *nskb_frag = *frag; __skb_frag_ref(nskb_frag); size = skb_frag_size(nskb_frag); if (pos < offset) { nskb_frag->page_offset += offset - pos; skb_frag_size_sub(nskb_frag, offset - pos); } skb_shinfo(nskb)->nr_frags++; if (pos + size <= offset + len) { i++; frag++; pos += size; } else { skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); goto skip_fraglist; } nskb_frag++; } skip_fraglist: nskb->data_len = len - hsize; nskb->len += nskb->data_len; nskb->truesize += nskb->data_len; perform_csum_check: if (!csum) { nskb->csum = skb_checksum(nskb, doffset, nskb->len - doffset, 0); nskb->ip_summed = CHECKSUM_NONE; } } while ((offset += len) < head_skb->len); return segs; err: kfree_skb_list(segs); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(skb_segment); int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); unsigned int offset = skb_gro_offset(skb); unsigned int headlen = skb_headlen(skb); struct sk_buff *nskb, *lp, *p = *head; unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; unsigned int headroom; if (unlikely(p->len + len >= 65536)) return -E2BIG; lp = NAPI_GRO_CB(p)->last ?: p; pinfo = skb_shinfo(lp); if (headlen <= offset) { skb_frag_t *frag; skb_frag_t *frag2; int i = skbinfo->nr_frags; int nr_frags = pinfo->nr_frags + i; if (nr_frags > MAX_SKB_FRAGS) goto merge; offset -= headlen; pinfo->nr_frags = nr_frags; skbinfo->nr_frags = 0; frag = pinfo->frags + nr_frags; frag2 = skbinfo->frags + i; do { *--frag = *--frag2; } while (--i); frag->page_offset += offset; skb_frag_size_sub(frag, offset); /* all fragments truesize : remove (head size + sk_buff) */ delta_truesize = skb->truesize - SKB_TRUESIZE(skb_end_offset(skb)); skb->truesize -= skb->data_len; skb->len -= skb->data_len; skb->data_len = 0; NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; goto done; } else if (skb->head_frag) { int nr_frags = pinfo->nr_frags; skb_frag_t *frag = pinfo->frags + nr_frags; struct page *page = virt_to_head_page(skb->head); unsigned int first_size = headlen - offset; unsigned int first_offset; if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) goto merge; first_offset = skb->data - (unsigned char *)page_address(page) + offset; pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; frag->page.p = page; frag->page_offset = first_offset; skb_frag_size_set(frag, first_size); memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); /* We dont need to clear skbinfo->nr_frags here */ delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } if (pinfo->frag_list) goto merge; if (skb_gro_len(p) != pinfo->gso_size) return -E2BIG; headroom = skb_headroom(p); nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); if (unlikely(!nskb)) return -ENOMEM; __copy_skb_header(nskb, p); nskb->mac_len = p->mac_len; skb_reserve(nskb, headroom); __skb_put(nskb, skb_gro_offset(p)); skb_set_mac_header(nskb, skb_mac_header(p) - p->data); skb_set_network_header(nskb, skb_network_offset(p)); skb_set_transport_header(nskb, skb_transport_offset(p)); __skb_pull(p, skb_gro_offset(p)); memcpy(skb_mac_header(nskb), skb_mac_header(p), p->data - skb_mac_header(p)); skb_shinfo(nskb)->frag_list = p; skb_shinfo(nskb)->gso_size = pinfo->gso_size; pinfo->gso_size = 0; skb_header_release(p); NAPI_GRO_CB(nskb)->last = p; nskb->data_len += p->len; nskb->truesize += p->truesize; nskb->len += p->len; *head = nskb; nskb->next = p->next; p->next = NULL; p = nskb; merge: delta_truesize = skb->truesize; if (offset > headlen) { unsigned int eat = offset - headlen; skbinfo->frags[0].page_offset += eat; skb_frag_size_sub(&skbinfo->frags[0], eat); skb->data_len -= eat; skb->len -= eat; offset = headlen; } __skb_pull(skb, offset); if (!NAPI_GRO_CB(p)->last) skb_shinfo(p)->frag_list = skb; else NAPI_GRO_CB(p)->last->next = skb; NAPI_GRO_CB(p)->last = skb; skb_header_release(skb); lp = p; done: NAPI_GRO_CB(p)->count++; p->data_len += len; p->truesize += delta_truesize; p->len += len; if (lp != p) { lp->data_len += len; lp->truesize += delta_truesize; lp->len += len; } NAPI_GRO_CB(skb)->same_flow = 1; return 0; } EXPORT_SYMBOL_GPL(skb_gro_receive); void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", sizeof(struct sk_buff), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", (2*sizeof(struct sk_buff)) + sizeof(atomic_t), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } /** * skb_to_sgvec - Fill a scatter-gather list from a socket buffer * @skb: Socket buffer containing the buffers to be mapped * @sg: The scatter-gather list to map into * @offset: The offset into the buffer's contents to start mapping * @len: Length of buffer space to be mapped * * Fill the specified scatter-gather list with mappings/pointers into a * region of the buffer space attached to a socket buffer. */ static int __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int elt = 0; if (copy > 0) { if (copy > len) copy = len; sg_set_buf(sg, skb->data + offset, copy); elt++; if ((len -= copy) == 0) return elt; offset += copy; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int end; WARN_ON(start > offset + len); end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; sg_set_page(&sg[elt], skb_frag_page(frag), copy, frag->page_offset+offset-start); elt++; if (!(len -= copy)) return elt; offset += copy; } start = end; } skb_walk_frags(skb, frag_iter) { int end; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { if (copy > len) copy = len; elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, copy); if ((len -= copy) == 0) return elt; offset += copy; } start = end; } BUG_ON(len); return elt; } int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { int nsg = __skb_to_sgvec(skb, sg, offset, len); sg_mark_end(&sg[nsg - 1]); return nsg; } EXPORT_SYMBOL_GPL(skb_to_sgvec); /** * skb_cow_data - Check that a socket buffer's data buffers are writable * @skb: The socket buffer to check. * @tailbits: Amount of trailing space to be added * @trailer: Returned pointer to the skb where the @tailbits space begins * * Make sure that the data buffers attached to a socket buffer are * writable. If they are not, private copies are made of the data buffers * and the socket buffer is set to use these instead. * * If @tailbits is given, make sure that there is space to write @tailbits * bytes of data beyond current end of socket buffer. @trailer will be * set to point to the skb in which this space begins. * * The number of scatterlist elements required to completely map the * COW'd and extended socket buffer will be returned. */ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) { int copyflag; int elt; struct sk_buff *skb1, **skb_p; /* If skb is cloned or its head is paged, reallocate * head pulling out all the pages (pages are considered not writable * at the moment even if they are anonymous). */ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) return -ENOMEM; /* Easy case. Most of packets will go this way. */ if (!skb_has_frag_list(skb)) { /* A little of trouble, not enough of space for trailer. * This should not happen, when stack is tuned to generate * good frames. OK, on miss we reallocate and reserve even more * space, 128 bytes is fair. */ if (skb_tailroom(skb) < tailbits && pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) return -ENOMEM; /* Voila! */ *trailer = skb; return 1; } /* Misery. We are in troubles, going to mincer fragments... */ elt = 1; skb_p = &skb_shinfo(skb)->frag_list; copyflag = 0; while ((skb1 = *skb_p) != NULL) { int ntail = 0; /* The fragment is partially pulled by someone, * this can happen on input. Copy it and everything * after it. */ if (skb_shared(skb1)) copyflag = 1; /* If the skb is the last, worry about trailer. */ if (skb1->next == NULL && tailbits) { if (skb_shinfo(skb1)->nr_frags || skb_has_frag_list(skb1) || skb_tailroom(skb1) < tailbits) ntail = tailbits + 128; } if (copyflag || skb_cloned(skb1) || ntail || skb_shinfo(skb1)->nr_frags || skb_has_frag_list(skb1)) { struct sk_buff *skb2; /* Fuck, we are miserable poor guys... */ if (ntail == 0) skb2 = skb_copy(skb1, GFP_ATOMIC); else skb2 = skb_copy_expand(skb1, skb_headroom(skb1), ntail, GFP_ATOMIC); if (unlikely(skb2 == NULL)) return -ENOMEM; if (skb1->sk) skb_set_owner_w(skb2, skb1->sk); /* Looking around. Are we still alive? * OK, link new skb, drop old one */ skb2->next = skb1->next; *skb_p = skb2; kfree_skb(skb1); skb1 = skb2; } elt++; *trailer = skb1; skb_p = &skb1->next; } return elt; } EXPORT_SYMBOL_GPL(skb_cow_data); static void sock_rmem_free(struct sk_buff *skb) { struct sock *sk = skb->sk; atomic_sub(skb->truesize, &sk->sk_rmem_alloc); } /* * Note: We dont mem charge error packets (no sk_forward_alloc changes) */ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf) return -ENOMEM; skb_orphan(skb); skb->sk = sk; skb->destructor = sock_rmem_free; atomic_add(skb->truesize, &sk->sk_rmem_alloc); /* before exiting rcu section, make sure dst is refcounted */ skb_dst_force(skb); skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, len); return 0; } EXPORT_SYMBOL(sock_queue_err_skb); void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps) { struct sock *sk = orig_skb->sk; struct sock_exterr_skb *serr; struct sk_buff *skb; int err; if (!sk) return; if (hwtstamps) { *skb_hwtstamps(orig_skb) = *hwtstamps; } else { /* * no hardware time stamps available, * so keep the shared tx_flags and only * store software time stamp */ orig_skb->tstamp = ktime_get_real(); } skb = skb_clone(orig_skb, GFP_ATOMIC); if (!skb) return; serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; err = sock_queue_err_skb(sk, skb); if (err) kfree_skb(skb); } EXPORT_SYMBOL_GPL(skb_tstamp_tx); void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) { struct sock *sk = skb->sk; struct sock_exterr_skb *serr; int err; skb->wifi_acked_valid = 1; skb->wifi_acked = acked; serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; err = sock_queue_err_skb(sk, skb); if (err) kfree_skb(skb); } EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); /** * skb_partial_csum_set - set up and verify partial csum values for packet * @skb: the skb to set * @start: the number of bytes after skb->data to start checksumming. * @off: the offset from start to place the checksum. * * For untrusted partially-checksummed packets, we need to make sure the values * for skb->csum_start and skb->csum_offset are valid so we don't oops. * * This function checks and sets those values and skb->ip_summed: if this * returns false you should drop the packet. */ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) { if (unlikely(start > skb_headlen(skb)) || unlikely((int)start + off > skb_headlen(skb) - 2)) { net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", start, off, skb_headlen(skb)); return false; } skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_headroom(skb) + start; skb->csum_offset = off; skb_set_transport_header(skb, start); return true; } EXPORT_SYMBOL_GPL(skb_partial_csum_set); static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, unsigned int max) { if (skb_headlen(skb) >= len) return 0; /* If we need to pullup then pullup to the max, so we * won't need to do it again. */ if (max > skb->len) max = skb->len; if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) return -ENOMEM; if (skb_headlen(skb) < len) return -EPROTO; return 0; } /* This value should be large enough to cover a tagged ethernet header plus * maximally sized IP and TCP or UDP headers. */ #define MAX_IP_HDR_LEN 128 static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate) { unsigned int off; bool fragment; int err; fragment = false; err = skb_maybe_pull_tail(skb, sizeof(struct iphdr), MAX_IP_HDR_LEN); if (err < 0) goto out; if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) fragment = true; off = ip_hdrlen(skb); err = -EPROTO; if (fragment) goto out; switch (ip_hdr(skb)->protocol) { case IPPROTO_TCP: err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), MAX_IP_HDR_LEN); if (err < 0) goto out; if (!skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) { err = -EPROTO; goto out; } if (recalculate) tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - off, IPPROTO_TCP, 0); break; case IPPROTO_UDP: err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), MAX_IP_HDR_LEN); if (err < 0) goto out; if (!skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) { err = -EPROTO; goto out; } if (recalculate) udp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - off, IPPROTO_UDP, 0); break; default: goto out; } err = 0; out: return err; } /* This value should be large enough to cover a tagged ethernet header plus * an IPv6 header, all options, and a maximal TCP or UDP header. */ #define MAX_IPV6_HDR_LEN 256 #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) { int err; u8 nexthdr; unsigned int off; unsigned int len; bool fragment; bool done; fragment = false; done = false; off = sizeof(struct ipv6hdr); err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); if (err < 0) goto out; nexthdr = ipv6_hdr(skb)->nexthdr; len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); while (off <= len && !done) { switch (nexthdr) { case IPPROTO_DSTOPTS: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: { struct ipv6_opt_hdr *hp; err = skb_maybe_pull_tail(skb, off + sizeof(struct ipv6_opt_hdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); nexthdr = hp->nexthdr; off += ipv6_optlen(hp); break; } case IPPROTO_AH: { struct ip_auth_hdr *hp; err = skb_maybe_pull_tail(skb, off + sizeof(struct ip_auth_hdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; hp = OPT_HDR(struct ip_auth_hdr, skb, off); nexthdr = hp->nexthdr; off += ipv6_authlen(hp); break; } case IPPROTO_FRAGMENT: { struct frag_hdr *hp; err = skb_maybe_pull_tail(skb, off + sizeof(struct frag_hdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; hp = OPT_HDR(struct frag_hdr, skb, off); if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) fragment = true; nexthdr = hp->nexthdr; off += sizeof(struct frag_hdr); break; } default: done = true; break; } } err = -EPROTO; if (!done || fragment) goto out; switch (nexthdr) { case IPPROTO_TCP: err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; if (!skb_partial_csum_set(skb, off, offsetof(struct tcphdr, check))) { err = -EPROTO; goto out; } if (recalculate) tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - off, IPPROTO_TCP, 0); break; case IPPROTO_UDP: err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), MAX_IPV6_HDR_LEN); if (err < 0) goto out; if (!skb_partial_csum_set(skb, off, offsetof(struct udphdr, check))) { err = -EPROTO; goto out; } if (recalculate) udp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - off, IPPROTO_UDP, 0); break; default: goto out; } err = 0; out: return err; } /** * skb_checksum_setup - set up partial checksum offset * @skb: the skb to set up * @recalculate: if true the pseudo-header checksum will be recalculated */ int skb_checksum_setup(struct sk_buff *skb, bool recalculate) { int err; switch (skb->protocol) { case htons(ETH_P_IP): err = skb_checksum_setup_ip(skb, recalculate); break; case htons(ETH_P_IPV6): err = skb_checksum_setup_ipv6(skb, recalculate); break; default: err = -EPROTO; break; } return err; } EXPORT_SYMBOL(skb_checksum_setup); void __skb_warn_lro_forwarding(const struct sk_buff *skb) { net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", skb->dev->name); } EXPORT_SYMBOL(__skb_warn_lro_forwarding); void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) { if (head_stolen) { skb_release_head_state(skb); kmem_cache_free(skbuff_head_cache, skb); } else { __kfree_skb(skb); } } EXPORT_SYMBOL(kfree_skb_partial); /** * skb_try_coalesce - try to merge skb to prior one * @to: prior buffer * @from: buffer to add * @fragstolen: pointer to boolean * @delta_truesize: how much more was allocated than was requested */ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) { int i, delta, len = from->len; *fragstolen = false; if (skb_cloned(to)) return false; if (len <= skb_tailroom(to)) { BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } if (skb_has_frag_list(to) || skb_has_frag_list(from)) return false; if (skb_headlen(from) != 0) { struct page *page; unsigned int offset; if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) return false; if (skb_head_is_locked(from)) return false; delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; } else { if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) return false; delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); } WARN_ON_ONCE(delta < len); memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0; /* if the skb is not cloned this does nothing * since we set nr_frags to 0. */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i); to->truesize += delta; to->len += len; to->data_len += len; *delta_truesize = delta; return true; } EXPORT_SYMBOL(skb_try_coalesce); /** * skb_scrub_packet - scrub an skb * * @skb: buffer to clean * @xnet: packet is crossing netns * * skb_scrub_packet can be used after encapsulating or decapsulting a packet * into/from a tunnel. Some information have to be cleared during these * operations. * skb_scrub_packet can also be used to clean a skb before injecting it in * another namespace (@xnet == true). We have to clear all information in the * skb that could impact namespace isolation. */ void skb_scrub_packet(struct sk_buff *skb, bool xnet) { if (xnet) skb_orphan(skb); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; skb->local_df = 0; skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); nf_reset(skb); nf_reset_trace(skb); } EXPORT_SYMBOL_GPL(skb_scrub_packet); /** * skb_gso_transport_seglen - Return length of individual segments of a gso packet * * @skb: GSO skb * * skb_gso_transport_seglen is used to determine the real size of the * individual segments, including Layer4 headers (TCP/UDP). * * The MAC/L2 or network (IP, IPv6) headers are not accounted for. */ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); unsigned int hdr_len; if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) hdr_len = tcp_hdrlen(skb); else hdr_len = sizeof(struct udphdr); return hdr_len + shinfo->gso_size; } EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
./CrossVul/dataset_final_sorted/CWE-416/c/bad_2030_0
crossvul-cpp_data_bad_820_5
// SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <generated/utsrelease.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include "trace.h" #include "trace_output.h" /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ bool ring_buffer_expanded; /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If a tracer is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; /* Pipe tracepoints to printk */ struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ static DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static int __init set_cmdline_ftrace(char *str) { strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = true; return 1; } __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { allocate_snapshot = true; /* We also need the main ring buffer expanded */ ring_buffer_expanded = true; return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 0; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 0; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; int ret = -ENODEV; mutex_lock(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; ret = 0; break; } } mutex_unlock(&trace_types_lock); return ret; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } void trace_array_put(struct trace_array *this_tr) { mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); } int call_filter_check_discard(struct trace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && !filter_match_preds(call->filter, rec)) { __trace_event_discard_commit(buffer, event); return 1; } return 0; } void trace_free_pid_list(struct trace_pid_list *pid_list) { vfree(pid_list->pids); kfree(pid_list); } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { /* * If pid_max changed after filtered_pids was created, we * by default ignore all pids greater than the previous pid_max. */ if (search_pid >= filtered_pids->pid_max) return false; return test_bit(search_pid, filtered_pids->pids); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* * Return false, because if filtered_pids does not exist, * all pids are good to trace. */ if (!filtered_pids) return false; return !trace_find_filtered_pid(filtered_pids, task->pid); } /** * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* Sorry, but we don't support pid_max changing after setting */ if (task->pid >= pid_list->pid_max) return; /* "self" is set for forks, and NULL for exits */ if (self) set_bit(task->pid, pid_list->pids); else clear_bit(task->pid, pid_list->pids); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { unsigned long pid = (unsigned long)v; (*pos)++; /* pid already is +1 of the actual prevous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ if (pid < pid_list->pid_max) return (void *)(pid + 1); return NULL; } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; loff_t l = 0; pid = find_first_bit(pid_list->pids, pid_list->pid_max); if (pid >= pid_list->pid_max) return NULL; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret = 0; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return -ENOMEM; pid_list->pid_max = READ_ONCE(pid_max); /* Only truncating will shrink pid_max */ if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) pid_list->pid_max = filtered_pids->pid_max; pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); if (!pid_list->pids) { kfree(pid_list); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ for_each_set_bit(pid, filtered_pids->pids, filtered_pids->pid_max) { set_bit(pid, pid_list->pids); nr_pids++; } } while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0 || !trace_parser_loaded(&parser)) break; read += ret; ubuf += ret; cnt -= ret; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; if (val >= pid_list->pid_max) break; pid = (pid_t)val; set_bit(pid, pid_list->pids); nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_free_pid_list(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_free_pid_list(pid_list); read = ret; pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer, cpu); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been disabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned long flags, int pc) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, flags, pc); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_on(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); } else ring_buffer_unlock_commit(buffer, event); } /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return size; } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return 1; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); internal_trace_puts("*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); internal_trace_puts("*** stopping trace here! ***\n"); tracing_off(); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id(), cond_data); local_irq_restore(flags); } void tracing_snapshot_instance(struct trace_array *tr) { tracing_snapshot_instance_cond(tr, NULL); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/debug/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); /** * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. * @tr: The tracing instance to snapshot * @cond_data: The data to be tested conditionally, and possibly saved * * This is the same as tracing_snapshot() except that the snapshot is * conditional - the snapshot will only happen if the * cond_snapshot.update() implementation receiving the cond_data * returns true, which means that the trace array's cond_snapshot * update() operation used the cond_data to determine whether the * snapshot should be taken, and if it was, presumably saved it along * with the snapshot. */ void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { tracing_snapshot_instance_cond(tr, cond_data); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); /** * tracing_snapshot_cond_data - get the user data associated with a snapshot * @tr: The tracing instance * * When the user enables a conditional snapshot using * tracing_snapshot_cond_enable(), the user-defined cond_data is saved * with the snapshot. This accessor is used to retrieve it. * * Should not be called from cond_snapshot.update(), since it takes * the tr->max_lock lock, which the code calling * cond_snapshot.update() has already done. * * Returns the cond_data associated with the trace array's snapshot. */ void *tracing_cond_snapshot_data(struct trace_array *tr) { void *cond_data = NULL; arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) cond_data = tr->cond_snapshot->cond_data; arch_spin_unlock(&tr->max_lock); return cond_data; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; if (!tr->allocated_snapshot) { /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); /** * tracing_snapshot_cond_enable - enable conditional snapshot for an instance * @tr: The tracing instance * @cond_data: User data to associate with the snapshot * @update: Implementation of the cond_snapshot update function * * Check whether the conditional snapshot for the given instance has * already been enabled, or if the current tracer is already using a * snapshot; if so, return -EBUSY, else create a cond_snapshot and * save the cond_data and update function inside. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { struct cond_snapshot *cond_snapshot; int ret = 0; cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); if (!cond_snapshot) return -ENOMEM; cond_snapshot->cond_data = cond_data; cond_snapshot->update = update; mutex_lock(&trace_types_lock); ret = tracing_alloc_snapshot_instance(tr); if (ret) goto fail_unlock; if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto fail_unlock; } /* * The cond_snapshot can only change to NULL without the * trace_types_lock. We don't care if we race with it going * to NULL, but we want to make sure that it's not set to * something other than NULL when we get here, which we can * do safely with only holding the trace_types_lock and not * having to take the max_lock. */ if (tr->cond_snapshot) { ret = -EBUSY; goto fail_unlock; } arch_spin_lock(&tr->max_lock); tr->cond_snapshot = cond_snapshot; arch_spin_unlock(&tr->max_lock); mutex_unlock(&trace_types_lock); return ret; fail_unlock: mutex_unlock(&trace_types_lock); kfree(cond_snapshot); return ret; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); /** * tracing_snapshot_cond_disable - disable conditional snapshot for an instance * @tr: The tracing instance * * Check whether the conditional snapshot for the given instance is * enabled; if so, free the cond_snapshot associated with it, * otherwise return -EINVAL. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_disable(struct trace_array *tr) { int ret = 0; arch_spin_lock(&tr->max_lock); if (!tr->cond_snapshot) ret = -EINVAL; else { kfree(tr->cond_snapshot); tr->cond_snapshot = NULL; } arch_spin_unlock(&tr->max_lock); return ret; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); void *tracing_cond_snapshot_data(struct trace_array *tr) { return NULL; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); int tracing_snapshot_cond_disable(struct trace_array *tr) { return false; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_off(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) tracing_off(); } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) return ring_buffer_record_is_on(tr->trace_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* nr_entries can not be zero */ if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit postions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; ret = read; goto out; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto out; } *ppos += read; ret = read; out: return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->seq.readpos) return -EBUSY; len = trace_seq_used(s) - s->seq.readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->seq.readpos, cnt); s->seq.readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_buffer *trace_buf = &tr->trace_buffer; struct trace_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * @cond_data: User data associated with a conditional snapshot * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, void *cond_data) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from trace_buffer */ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); #ifdef CONFIG_TRACER_SNAPSHOT if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) goto out_unlock; #endif swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); out_unlock: arch_spin_unlock(&tr->max_lock); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr - tracer * @tsk - task with the latency * @cpu - the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ static int wait_on_pipe(struct trace_iterator *iter, int full) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, full); } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->trace_buffer); tr->current_trace = type; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; mutex_lock(&trace_types_lock); if (list_empty(&postponed_selftests)) goto out; pr_info("Running postponed tracer tests:\n"); list_for_each_entry_safe(p, n, &postponed_selftests, list) { ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } out: mutex_unlock(&trace_types_lock); return 0; } core_initcall(init_trace_selftests); #else static inline int run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. * @type - the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } mutex_lock(&trace_types_lock); tracing_selftest_running = true; for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) { /*allocate a dummy tracer_flags*/ type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); if (!type->flags) { ret = -ENOMEM; goto out; } type->flags->val = 0; type->flags->opts = dummy_tracer_opt; } else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; /* store the tracer for __set_tracer_option */ type->flags->trace = type; ret = run_tracer_selftest(type); if (ret < 0) goto out; type->next = trace_types; trace_types = type; add_tracer_options(&global_trace, type); out: tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ tracing_selftest_disabled = true; #ifdef CONFIG_FTRACE_STARTUP_TEST printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", type->name); #endif out_unlock: return ret; } void tracing_reset(struct trace_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct trace_buffer *buf) { struct ring_buffer *buffer = buf->buffer; int cpu; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); for_each_online_cpu(cpu) ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus(void) { struct trace_array *tr; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } static int *tgid_map; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; char *saved_cmdlines; }; static struct saved_cmdlines_buffer *savedcmd; /* temporary disable recording */ static atomic_t trace_record_taskinfo_disabled __read_mostly; static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; } static inline void set_cmdline(int idx, const char *cmdline) { strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); } static int allocate_cmdlines_buffer(unsigned int val, struct saved_cmdlines_buffer *s) { s->map_cmdline_to_pid = kmalloc_array(val, sizeof(*s->map_cmdline_to_pid), GFP_KERNEL); if (!s->map_cmdline_to_pid) return -ENOMEM; s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); if (!s->saved_cmdlines) { kfree(s->map_cmdline_to_pid); return -ENOMEM; } s->cmdline_idx = 0; s->cmdline_num = val; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); return 0; } static int trace_create_savedcmd(void) { int ret; savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); if (!savedcmd) return -ENOMEM; ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); if (ret < 0) { kfree(savedcmd); savedcmd = NULL; return -ENOMEM; } return 0; } int is_tracing_stopped(void) { return global_trace.stop_count; } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (--global_trace.stop_count) { if (global_trace.stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); global_trace.stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_start_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; /* If global, we need to also start the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_start(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (tr->stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); tr->stop_count = 0; } goto out; } buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { struct ring_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_stop_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; /* If global, we need to also stop the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_stop(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (tr->stop_count++) goto out; buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } static int trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(tsk->pid > PID_MAX_DEFAULT)) return 0; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; idx = savedcmd->map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; /* * Check whether the cmdline buffer at idx has a pid * mapped. We are going to overwrite that entry so we * need to clear the map_pid_to_cmdline. Otherwise we * would read the new comm for the old pid. */ pid = savedcmd->map_cmdline_to_pid[idx]; if (pid != NO_CMDLINE_MAP) savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; savedcmd->map_cmdline_to_pid[idx] = tsk->pid; savedcmd->map_pid_to_cmdline[tsk->pid] = idx; savedcmd->cmdline_idx = idx; } set_cmdline(idx, tsk->comm); arch_spin_unlock(&trace_cmdline_lock); return 1; } static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; if (!pid) { strcpy(comm, "<idle>"); return; } if (WARN_ON_ONCE(pid < 0)) { strcpy(comm, "<XXX>"); return; } if (pid > PID_MAX_DEFAULT) { strcpy(comm, "<...>"); return; } map = savedcmd->map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); else strcpy(comm, "<...>"); } void trace_find_cmdline(int pid, char comm[]) { preempt_disable(); arch_spin_lock(&trace_cmdline_lock); __trace_find_cmdline(pid, comm); arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } int trace_find_tgid(int pid) { if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) return 0; return tgid_map[pid]; } static int trace_save_tgid(struct task_struct *tsk) { /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) return 0; tgid_map[tsk->pid] = tsk->tgid; return 1; } static bool tracing_record_taskinfo_skip(int flags) { if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) return true; if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) return true; if (!__this_cpu_read(trace_taskinfo_save)) return true; return false; } /** * tracing_record_taskinfo - record the task info of a task * * @task - task to record * @flags - TRACE_RECORD_CMDLINE for recording comm * - TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo(struct task_struct *task, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * * @prev - previous task during sched_switch * @next - next task during sched_switch * @flags - TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /* Helpers to record a specific task information */ void tracing_record_cmdline(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); } void tracing_record_tgid(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_TGID); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | #else TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto failed; event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; preempt_disable(); if (cpu == smp_processor_id() && this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); preempt_enable(); } return; failed: trace_buffered_event_disable(); } static void enable_trace_buffered_event(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; preempt_disable(); /* For each CPU, set the buffer as used. */ smp_call_function_many(tracing_buffer_mask, disable_trace_buffered_event, NULL, 1); preempt_enable(); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Make sure trace_buffered_event is NULL before clearing * trace_buffered_event_cnt. */ smp_wmb(); preempt_disable(); /* Do the work on each cpu */ smp_call_function_many(tracing_buffer_mask, enable_trace_buffered_event, NULL, 1); preempt_enable(); } static struct ring_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *entry; int val; *current_rb = trace_file->tr->trace_buffer.buffer; if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && (entry = this_cpu_read(trace_buffered_event))) { /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); if (val == 1) { trace_event_setup(entry, type, flags, pc); entry->array[0] = len; return entry; } this_cpu_dec(trace_buffered_event_cnt); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the tigger to use. It's recusive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; event = &fbuffer->trace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; mutex_lock(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) goto out; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); out: mutex_unlock(&tracepoint_printk_mutex); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event) { struct trace_entry *entry; unsigned int size = 0; entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); static inline void ftrace_exports_enable(void) { static_branch_enable(&ftrace_exports_enabled); } static inline void ftrace_exports_disable(void) { static_branch_disable(&ftrace_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event) { struct trace_export *export; preempt_disable_notrace(); export = rcu_dereference_raw_notrace(ftrace_exports_list); while (export) { trace_process_export(export, event); export = rcu_dereference_raw_notrace(export->next); } preempt_enable_notrace(); } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { if (*list == NULL) ftrace_exports_enable(); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); if (*list == NULL) ftrace_exports_disable(); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; mutex_lock(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { int ret; mutex_lock(&ftrace_export_lock); ret = rm_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return ret; } EXPORT_SYMBOL_GPL(unregister_ftrace_export); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct trace_event_call *call = &event_function; struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (!call_filter_check_discard(call, entry, buffer, event)) { if (static_branch_unlikely(&ftrace_exports_enabled)) ftrace_exports(event); __buffer_unlock_commit(buffer, event); } } #ifdef CONFIG_STACKTRACE #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) struct ftrace_stack { unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; }; static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; int use_stack; int size = FTRACE_STACK_ENTRIES; trace.nr_entries = 0; trace.skip = skip; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) trace.skip++; #endif /* * Since events can happen in NMIs there's no safe way to * use the per cpu ftrace_stacks. We reserve it and if an interrupt * or NMI comes in, it will just have to use the default * FTRACE_STACK_SIZE. */ preempt_disable_notrace(); use_stack = __this_cpu_inc_return(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would * have exited and put the counter back to what we want. * We just need a barrier to keep gcc from moving things * around. */ barrier(); if (use_stack == 1) { trace.entries = this_cpu_ptr(ftrace_stack.calls); trace.max_entries = FTRACE_STACK_MAX_ENTRIES; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); if (trace.nr_entries > size) size = trace.nr_entries; } else /* From now on, use_stack is a boolean */ use_stack = 0; size *= sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); memset(&entry->caller, 0, size); if (use_stack) memcpy(&entry->caller, trace.entries, trace.nr_entries * sizeof(unsigned long)); else { trace.max_entries = FTRACE_STACK_ENTRIES; trace.entries = entry->caller; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); } entry->size = trace.nr_entries; if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { struct ring_buffer *buffer = tr->trace_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); return; } /* * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and rcu_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; rcu_irq_enter_irqson(); __ftrace_trace_stack(buffer, flags, skip, pc, NULL); rcu_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { unsigned long flags; if (tracing_disabled || tracing_selftest_running) return; local_save_flags(flags); #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, skip, preempt_count(), NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = 0; trace.entries = entry->caller; save_stack_trace_user(&trace); if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED static void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, flags, preempt_count()); } #endif /* UNUSED */ #endif /* CONFIG_STACKTRACE */ /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct *trace_percpu_buffer; /* * Thise allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct *buffers; buffers = alloc_percpu(struct trace_buffer_struct); if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.trace_buffer.buffer) tracing_start_cmdline_record(); } void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_bprint; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; unsigned long flags; char *tbuffer; int len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out; local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; int len = 0, size, pc; struct print_entry *entry; unsigned long flags; char *tbuffer; if (tracing_disabled || tracing_selftest_running) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); local_save_flags(flags); size = sizeof(*entry) + len + 1; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } __printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } __printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_read(buf_iter, NULL); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, lost_events); if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct ring_buffer *buffer = iter->trace_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, NULL, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { if (ts >= iter->trace_buffer->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; /* * copy the tracer to avoid using a global lock all around. * iter->trace is a copy of current_trace, the pointer to the * name may be used instead of a strcmp(), as iter->trace->name * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) *iter->trace = *tr->current_trace; mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (!iter->snapshot) atomic_inc(&trace_record_taskinfo_disabled); if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif if (!iter->snapshot) atomic_dec(&trace_record_taskinfo_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries(struct trace_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long count; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total += count; } else *total += count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries += count; } } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off \n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / delay \n" "# cmd pid ||||| time | caller \n" "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; const char tgid_space[] = " "; const char space[] = " "; print_event_info(buf, m); seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? tgid_space : space); seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct trace_buffer *buf = iter->trace_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = "preemption"; name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, #if defined(CONFIG_PREEMPT_NONE) "server", #elif defined(CONFIG_PREEMPT_VOLUNTARY) "desktop", #elif defined(CONFIG_PREEMPT) "preempt", #else "unknown", #endif /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) return event->funcs->trace(iter, sym_flags, event); trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->trace_buffer, m, trace_flags); else print_func_help_header(iter->trace_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { print_trace_line(iter); ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ mutex_lock(&trace_types_lock); iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) goto fail; *iter->trace = *tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->trace_buffer = &tr->max_buffer; else #endif iter->trace_buffer = &tr->trace_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* stop the trace while dumping if we are not opening "snapshot" */ if (!iter->snapshot) tracing_stop_tr(tr); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu, GFP_KERNEL); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu, GFP_KERNEL); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); kfree(iter->buffer_iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ static int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; filp->private_data = inode->i_private; return 0; } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (!iter->snapshot) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter->buffer_iter); seq_release_private(inode, file); return 0; } static int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct trace_buffer *trace_buf = &tr->trace_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; if (tracing_disabled) return -ENODEV; ret = seq_open(file, &show_traces_seq_ops); if (ret) return ret; m = file->private_data; m->private = tr; return 0; } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: kfree(mask_str); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err, cpu; if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); free_cpumask_var(tracing_cpumask_new); return count; err_unlock: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; u32 tracer_flags; int i; mutex_lock(&trace_types_lock); tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } mutex_unlock(&trace_types_lock); return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret; ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { if (!tgid_map) tgid_map = kcalloc(PID_MAX_DEFAULT + 1, sizeof(*tgid_map), GFP_KERNEL); if (!tgid_map) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; } trace_event_enable_tgid_record(enabled); } if (mask == TRACE_ITER_EVENT_FORK) trace_event_follow_fork(tr, enabled); if (mask == TRACE_ITER_FUNC_FORK) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif } if (mask == TRACE_ITER_PRINTK) { trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); } return 0; } static int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t-change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t-view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by adding a suffix 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t -:[<group>/]<event>\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n" #else "\t $stack<index>, $stack, $retval, $comm\n" #endif "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>,\n" "\t <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [:<handler>.<action>]\n" "\t [if <filter>]\n\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analogous to\n" "\t the enable_event and disable_event triggers.\n\n" "\t Hist trigger handlers and actions are executed whenever a\n" "\t a histogram entry is added or updated. They take the form:\n\n" "\t <handler>.<action>\n\n" "\t The available handlers are:\n\n" "\t onmatch(matching.event) - invoke on addition or update\n" "\t onmax(var) - invoke if var exceeds current max\n" "\t onchange(var) - invoke action if var changes\n\n" "\t The available actions are:\n\n" "\t trace(<synthetic_event>,param list) - generate synthetic event\n" "\t save(field,...) - save current event fields\n" #ifdef CONFIG_TRACER_SNAPSHOT "\t snapshot() - snapshot the trace buffer\n" #endif #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) { int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { if (trace_find_tgid(*ptr)) return ptr; } return NULL; } static void *saved_tgids_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; if (!tgid_map) return NULL; v = &tgid_map[0]; while (l <= *pos) { v = saved_tgids_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_tgids_stop(struct seq_file *m, void *v) { } static int saved_tgids_show(struct seq_file *m, void *v) { int pid = (int *)v - tgid_map; seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); return 0; } static const struct seq_operations tracing_saved_tgids_seq_ops = { .start = saved_tgids_start, .stop = saved_tgids_stop, .next = saved_tgids_next, .show = saved_tgids_show, }; static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_tgids_seq_ops); } static const struct file_operations tracing_saved_tgids_fops = { .open = tracing_saved_tgids_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) { unsigned int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; ptr++) { if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) continue; return ptr; } return NULL; } static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; preempt_disable(); arch_spin_lock(&trace_cmdline_lock); v = &savedcmd->map_cmdline_to_pid[0]; while (l <= *pos) { v = saved_cmdlines_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_cmdlines_stop(struct seq_file *m, void *v) { arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } static int saved_cmdlines_show(struct seq_file *m, void *v) { char buf[TASK_COMM_LEN]; unsigned int *pid = v; __trace_find_cmdline(*pid, buf); seq_printf(m, "%d %s\n", *pid, buf); return 0; } static const struct seq_operations tracing_saved_cmdlines_seq_ops = { .start = saved_cmdlines_start, .next = saved_cmdlines_next, .stop = saved_cmdlines_stop, .show = saved_cmdlines_show, }; static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_cmdlines_seq_ops); } static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_saved_cmdlines_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static ssize_t tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) { kfree(s->saved_cmdlines); kfree(s->map_cmdline_to_pid); kfree(s); } static int tracing_resize_saved_cmdlines(unsigned int val) { struct saved_cmdlines_buffer *s, *savedcmd_temp; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; if (allocate_cmdlines_buffer(val, s) < 0) { kfree(s); return -ENOMEM; } arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); free_saved_cmdlines_buffer(savedcmd_temp); return 0; } static ssize_t tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry or less than PID_MAX_DEFAULT */ if (!val || val > PID_MAX_DEFAULT) return -EINVAL; ret = tracing_resize_saved_cmdlines((unsigned int)val); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations tracing_saved_cmdlines_size_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_size_read, .write = tracing_saved_cmdlines_size_write, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; (*pos)++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } mutex_lock(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); mutex_unlock(&trace_eval_mutex); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", 0444, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_insert_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; if (len <= 0) return; map = start; trace_event_eval_update(map, len); trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); r = sprintf(buf, "%s\n", tr->current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->trace_buffer); return t->init(tr); } static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ ring_buffer_expanded = true; /* May be called before buffers are initialized */ if (!tr->trace_buffer.buffer) return 0; ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); if (ret < 0) return ret; #ifdef CONFIG_TRACER_MAX_TRACE if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || !tr->current_trace->use_max_tr) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->trace_buffer, &tr->trace_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } return ret; } if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->max_buffer, size); else per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; out: #endif /* CONFIG_TRACER_MAX_TRACE */ if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->trace_buffer, size); else per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; return ret; } static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { int ret = size; mutex_lock(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { ret = -EINVAL; goto out; } } ret = __tracing_resize_ring_buffer(tr, size, cpu_id); if (ret < 0) ret = -ENOMEM; out: mutex_unlock(&trace_types_lock); return ret; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(void) { int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer); /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; } static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; create_trace_option_files(tr, t); } static int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) goto out; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -EINVAL; goto out; } if (t == tr->current_trace) goto out; #ifdef CONFIG_TRACER_SNAPSHOT if (t->use_max_tr) { arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); if (ret) goto out; } #endif /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); goto out; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(t, tr)) { ret = -EINVAL; goto out; } /* If trace pipe files are being read, we can't change the tracer */ if (tr->current_trace->ref) { ret = -EBUSY; goto out; } trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->allocated_snapshot; if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); } #endif #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } #endif if (t->init) { ret = tracer_init(t, tr); if (ret) goto out; } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); return ret; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; err = tracing_set_tracer(tr, buf); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; mutex_lock(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) goto out; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) goto out; } ret = cnt; out: mutex_unlock(&trace_types_lock); return ret; } #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); } #endif static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; mutex_lock(&trace_types_lock); /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; __trace_array_put(tr); goto out; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->trace_buffer = &tr->trace_buffer; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->current_trace->ref++; out: mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter); __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); tr->current_trace->ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); mutex_destroy(&iter->mutex); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER_BLOCK) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, filp, poll_table); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) goto out; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static const struct pipe_buf_operations tracing_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), trace_seq_used(&iter->seq)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = trace_seq_used(&iter->seq); trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; const char faulted[] = "<faulted>"; ssize_t written; int size; int len; /* Used in tracing_mark_raw_write() as well */ #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ /* If less than "<faulted>", then make sure we can still add that */ if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (unlikely(!event)) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); if (len) { memcpy(&entry->buf, faulted, FAULTED_SIZE); cnt = FAULTED_SIZE; written = -EFAULT; } else written = cnt; len = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); if (written > 0) *fpos += written; return written; } /* Limit it for now to 3K (including tag) */ #define RAW_DATA_MAX_SIZE (1024*3) static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; struct ring_buffer *buffer; struct raw_data_entry *entry; const char faulted[] = "<faulted>"; unsigned long irq_flags; ssize_t written; int size; int len; #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt; if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, irq_flags, preempt_count()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); if (len) { entry->id = -1; memcpy(&entry->buf, faulted, FAULTED_SIZE); written = -EFAULT; } else written = cnt; __buffer_unlock_commit(buffer, event); if (written > 0) *fpos += written; return written; } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; mutex_lock(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; mutex_lock(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); mutex_unlock(&trace_types_lock); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) { int ret = 0; mutex_lock(&trace_types_lock); if (abs && tr->time_stamp_abs_ref++) goto out; if (!abs) { if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { ret = -EINVAL; goto out; } if (--tr->time_stamp_abs_ref) goto out; } ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); #endif out: mutex_unlock(&trace_types_lock); return ret; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->trace_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; mutex_lock(&trace_types_lock); if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto out; } arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); if (ret) goto out; switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } #endif if (!tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) update_max_tr(tr, current, smp_processor_id(), NULL); else update_max_tr_single(tr, current, iter->cpu_file); local_irq_enable(); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } out: mutex_unlock(&trace_types_lock); return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.trace_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_raw_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; #endif /* CONFIG_TRACER_SNAPSHOT */ static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.trace_buffer = &tr->trace_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->current_trace->ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->trace_buffer->buffer, &info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = PAGE_SIZE - info->read; if (size > count) size = count; ret = copy_to_user(ubuf, info->spare + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; mutex_lock(&trace_types_lock); iter->tr->current_trace->ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare_cpu, info->spare); kfree(info); mutex_unlock(&trace_types_lock); return 0; } struct buffer_ref { struct ring_buffer *buffer; void *page; int cpu; int ref; }; static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); buf->private = 0; } static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; ref->ref++; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (*ppos & (PAGE_SIZE - 1)) return -EINVAL; if (len & (PAGE_SIZE - 1)) { if (len < PAGE_SIZE) return -EINVAL; len &= PAGE_MASK; } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, &ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ref->page); spd.pages[i] = page; spd.partial[i].len = PAGE_SIZE; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += PAGE_SIZE; entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->tr->buffer_percent); if (ret) goto out; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct trace_buffer *trace_buf = &tr->trace_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer, cpu)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *p = filp->private_data; char buf[64]; /* Not too big for a shallow stack */ int r; r = scnprintf(buf, 63, "%ld", *p); buf[r++] = '\n'; return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') return unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; ret = register_ftrace_function_probe(glob, tr, ops, count); out: return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); WARN_ONCE(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", 0444, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, tr, cpu, &snapshot_raw_fops); #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, 0644, t_options, topt, &trace_options_fops); } static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; int i; if (!tracer) return; flags = tracer->flags; if (!flags || !flags->opts) return; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return; for (i = 0; i < tr->nr_topts; i++) { /* Make sure there's no duplicate flags. */ if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) return; } opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); WARN_ONCE(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, 0644, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) create_trace_option_core_file(tr, trace_options[i], i); } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->trace_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { mutex_lock(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); } mutex_unlock(&trace_types_lock); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; if (!val) val = 1; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) { enum ring_buffer_flags rb_flags; rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; buf->tr = tr; buf->buffer = ring_buffer_alloc(size, rb_flags); if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->trace_buffer, ring_buffer_size(tr->trace_buffer.buffer, 0)); return 0; } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; /* * Only the top level trace array gets its snapshot allocated * from the kernel command line. */ allocate_snapshot = false; #endif return 0; } static void free_trace_buffer(struct trace_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static void __update_tracer_options(struct trace_array *tr) { struct tracer *t; for (t = trace_types; t; t = t->next) add_tracer_options(tr, t); } static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -EEXIST; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) goto out_unlock; } ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) goto out_unlock; tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; tr->current_trace = &nop_trace; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; tr->dir = tracefs_create_dir(name, trace_instance_dir); if (!tr->dir) goto out_free_tr; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove_recursive(tr->dir); goto out_free_tr; } ftrace_init_trace_array(tr); init_tracer_tracefs(tr, tr->dir); init_trace_flags_index(tr); __update_tracer_options(tr); list_add(&tr->list, &ftrace_trace_arrays); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return 0; out_free_tr: free_trace_buffers(tr); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static int instance_rmdir(const char *name) { struct trace_array *tr; int found = 0; int ret; int i; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -ENODEV; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { found = 1; break; } } if (!found) goto out_unlock; ret = -EBUSY; if (tr->ref || (tr->current_trace && tr->current_trace->ref)) goto out_unlock; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1 << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1 << i, 0); } tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove_recursive(tr->dir); free_trace_buffers(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); ret = 0; out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static __init void create_trace_instances(struct dentry *d_tracer) { trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (WARN_ON(!trace_instance_dir)) return; } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { struct trace_event_file *file; int cpu; trace_create_file("available_tracers", 0444, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", 0644, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", 0644, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", 0644, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", 0644, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); file = __find_event_file(tr, "ftrace", "print"); if (file && file->dir) trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); tr->trace_marker_file = file; trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", 0644, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", 0444, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", 0444, d_tracer, tr, &buffer_percent_fops); create_trace_options_dir(tr); #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) trace_create_file("tracing_max_latency", 0644, d_tracer, &tr->max_latency, &tracing_max_lat_fops); #endif if (ftrace_create_function_files(tr, d_tracer)) WARN(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, tr, &snapshot_fops); #endif for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; mnt = vfs_submount(mntpt, type, "tracefs", NULL); put_filesystem(type); if (IS_ERR(mnt)) return NULL; mntget(mnt); return mnt; } /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ struct dentry *tracing_init_dentry(void) { struct trace_array *tr = &global_trace; /* The top level trace array uses NULL as parent */ if (tr->dir) return NULL; if (WARN_ON(!tracefs_initialized()) || (IS_ENABLED(CONFIG_DEBUG_FS) && WARN_ON(!debugfs_initialized()))) return ERR_PTR(-ENODEV); /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kerenl. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); if (!tr->dir) { pr_warn_once("Could not create debugfs directory 'tracing'\n"); return ERR_PTR(-ENOMEM); } return NULL; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static void __init trace_eval_init(void) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); } #ifdef CONFIG_MODULES static void trace_module_add_evals(struct module *mod) { if (!mod->num_trace_evals) return; /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; mutex_lock(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) goto out; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); out: mutex_unlock(&trace_eval_mutex); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); break; } return 0; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init int tracer_init_tracefs(void) { struct dentry *d_tracer; trace_access_lock_init(); d_tracer = tracing_init_dentry(); if (IS_ERR(d_tracer)) return 0; event_trace_init(); init_tracer_tracefs(&global_trace, d_tracer); ftrace_init_tracefs_toplevel(&global_trace, d_tracer); trace_create_file("tracing_thresh", 0644, d_tracer, &global_trace, &tracing_thresh_fops); trace_create_file("README", 0444, d_tracer, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", 0444, d_tracer, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", 0644, d_tracer, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", 0444, d_tracer, NULL, &tracing_saved_tgids_fops); trace_eval_init(); trace_create_eval_file(d_tracer); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif create_trace_instances(d_tracer); update_tracer_options(&global_trace); return 0; } static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); return NOTIFY_OK; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } void trace_init_global_iter(struct trace_iterator *iter) { iter->tr = &global_trace; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->trace_buffer = &global_trace.trace_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; static atomic_t dump_running; struct trace_array *tr = &global_trace; unsigned int old_userobj; unsigned long flags; int cnt = 0, cpu; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracing_off(); local_irq_save(flags); printk_nmi_direct_enter(); /* Simulate the iterator */ trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = RING_BUFFER_ALL_CPUS; break; case DUMP_ORIG: iter.cpu_file = raw_smp_processor_id(); break; case DUMP_NONE: goto out_enable; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); iter.cpu_file = RING_BUFFER_ALL_CPUS; } printk(KERN_TRACE "Dumping ftrace buffer:\n"); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); out_enable: tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); printk_nmi_direct_exit(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ftrace_dump); int trace_run_command(const char *buf, int (*createfn)(int, char **)) { char **argv; int argc, ret; argc = 0; ret = 0; argv = argv_split(GFP_KERNEL, buf, &argc); if (!argv) return -ENOMEM; if (argc) ret = createfn(argc, argv); argv_free(argv); return ret; } #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char **)) { char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); ret = -EINVAL; goto out; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = trace_run_command(buf, createfn); if (ret) goto out; buf += size; } while (done < count); } ret = done; out: kfree(kbuf); return ret; } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; /* * Make sure we don't accidently add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:preapre", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); goto out_free_savedcmd; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; ftrace_init_global_array_ops(&global_trace); init_trace_flags_index(&global_trace); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); list_add(&global_trace.list, &ftrace_trace_arrays); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_savedcmd: free_saved_cmdlines_buffer(savedcmd); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (WARN_ON(!tracepoint_print_iter)) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); } void __init trace_init(void) { trace_event_init(); } __init static int clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return 0; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; return 0; } fs_initcall(tracer_init_tracefs); late_initcall_sync(clear_boot_tracer); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static int tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } return 0; } late_initcall_sync(tracing_set_default_clock); #endif
./CrossVul/dataset_final_sorted/CWE-416/c/bad_820_5
crossvul-cpp_data_good_2851_0
/* * ALSA sequencer Client Manager * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * Takashi Iwai <tiwai@suse.de> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/minors.h> #include <linux/kmod.h> #include <sound/seq_kernel.h> #include "seq_clientmgr.h" #include "seq_memory.h" #include "seq_queue.h" #include "seq_timer.h" #include "seq_info.h" #include "seq_system.h" #include <sound/seq_device.h> #ifdef CONFIG_COMPAT #include <linux/compat.h> #endif /* Client Manager * this module handles the connections of userland and kernel clients * */ /* * There are four ranges of client numbers (last two shared): * 0..15: global clients * 16..127: statically allocated client numbers for cards 0..27 * 128..191: dynamically allocated client numbers for cards 28..31 * 128..191: dynamically allocated client numbers for applications */ /* number of kernel non-card clients */ #define SNDRV_SEQ_GLOBAL_CLIENTS 16 /* clients per cards, for static clients */ #define SNDRV_SEQ_CLIENTS_PER_CARD 4 /* dynamically allocated client numbers (both kernel drivers and user space) */ #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128 #define SNDRV_SEQ_LFLG_INPUT 0x0001 #define SNDRV_SEQ_LFLG_OUTPUT 0x0002 #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT) static DEFINE_SPINLOCK(clients_lock); static DEFINE_MUTEX(register_mutex); /* * client table */ static char clienttablock[SNDRV_SEQ_MAX_CLIENTS]; static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS]; static struct snd_seq_usage client_usage; /* * prototypes */ static int bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop); static int snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int filter, int atomic, int hop); /* */ static inline unsigned short snd_seq_file_flags(struct file *file) { switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) { case FMODE_WRITE: return SNDRV_SEQ_LFLG_OUTPUT; case FMODE_READ: return SNDRV_SEQ_LFLG_INPUT; default: return SNDRV_SEQ_LFLG_OPEN; } } static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client) { return snd_seq_total_cells(client->pool) > 0; } /* return pointer to client structure for specified id */ static struct snd_seq_client *clientptr(int clientid) { if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) { pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n", clientid); return NULL; } return clienttab[clientid]; } struct snd_seq_client *snd_seq_client_use_ptr(int clientid) { unsigned long flags; struct snd_seq_client *client; if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) { pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n", clientid); return NULL; } spin_lock_irqsave(&clients_lock, flags); client = clientptr(clientid); if (client) goto __lock; if (clienttablock[clientid]) { spin_unlock_irqrestore(&clients_lock, flags); return NULL; } spin_unlock_irqrestore(&clients_lock, flags); #ifdef CONFIG_MODULES if (!in_interrupt()) { static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS]; static char card_requested[SNDRV_CARDS]; if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) { int idx; if (!client_requested[clientid]) { client_requested[clientid] = 1; for (idx = 0; idx < 15; idx++) { if (seq_client_load[idx] < 0) break; if (seq_client_load[idx] == clientid) { request_module("snd-seq-client-%i", clientid); break; } } } } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) { int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) / SNDRV_SEQ_CLIENTS_PER_CARD; if (card < snd_ecards_limit) { if (! card_requested[card]) { card_requested[card] = 1; snd_request_card(card); } snd_seq_device_load_drivers(); } } spin_lock_irqsave(&clients_lock, flags); client = clientptr(clientid); if (client) goto __lock; spin_unlock_irqrestore(&clients_lock, flags); } #endif return NULL; __lock: snd_use_lock_use(&client->use_lock); spin_unlock_irqrestore(&clients_lock, flags); return client; } static void usage_alloc(struct snd_seq_usage *res, int num) { res->cur += num; if (res->cur > res->peak) res->peak = res->cur; } static void usage_free(struct snd_seq_usage *res, int num) { res->cur -= num; } /* initialise data structures */ int __init client_init_data(void) { /* zap out the client table */ memset(&clienttablock, 0, sizeof(clienttablock)); memset(&clienttab, 0, sizeof(clienttab)); return 0; } static struct snd_seq_client *seq_create_client1(int client_index, int poolsize) { unsigned long flags; int c; struct snd_seq_client *client; /* init client data */ client = kzalloc(sizeof(*client), GFP_KERNEL); if (client == NULL) return NULL; client->pool = snd_seq_pool_new(poolsize); if (client->pool == NULL) { kfree(client); return NULL; } client->type = NO_CLIENT; snd_use_lock_init(&client->use_lock); rwlock_init(&client->ports_lock); mutex_init(&client->ports_mutex); INIT_LIST_HEAD(&client->ports_list_head); /* find free slot in the client table */ spin_lock_irqsave(&clients_lock, flags); if (client_index < 0) { for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN; c < SNDRV_SEQ_MAX_CLIENTS; c++) { if (clienttab[c] || clienttablock[c]) continue; clienttab[client->number = c] = client; spin_unlock_irqrestore(&clients_lock, flags); return client; } } else { if (clienttab[client_index] == NULL && !clienttablock[client_index]) { clienttab[client->number = client_index] = client; spin_unlock_irqrestore(&clients_lock, flags); return client; } } spin_unlock_irqrestore(&clients_lock, flags); snd_seq_pool_delete(&client->pool); kfree(client); return NULL; /* no free slot found or busy, return failure code */ } static int seq_free_client1(struct snd_seq_client *client) { unsigned long flags; if (!client) return 0; snd_seq_delete_all_ports(client); snd_seq_queue_client_leave(client->number); spin_lock_irqsave(&clients_lock, flags); clienttablock[client->number] = 1; clienttab[client->number] = NULL; spin_unlock_irqrestore(&clients_lock, flags); snd_use_lock_sync(&client->use_lock); snd_seq_queue_client_termination(client->number); if (client->pool) snd_seq_pool_delete(&client->pool); spin_lock_irqsave(&clients_lock, flags); clienttablock[client->number] = 0; spin_unlock_irqrestore(&clients_lock, flags); return 0; } static void seq_free_client(struct snd_seq_client * client) { mutex_lock(&register_mutex); switch (client->type) { case NO_CLIENT: pr_warn("ALSA: seq: Trying to free unused client %d\n", client->number); break; case USER_CLIENT: case KERNEL_CLIENT: seq_free_client1(client); usage_free(&client_usage, 1); break; default: pr_err("ALSA: seq: Trying to free client %d with undefined type = %d\n", client->number, client->type); } mutex_unlock(&register_mutex); snd_seq_system_client_ev_client_exit(client->number); } /* -------------------------------------------------------- */ /* create a user client */ static int snd_seq_open(struct inode *inode, struct file *file) { int c, mode; /* client id */ struct snd_seq_client *client; struct snd_seq_user_client *user; int err; err = nonseekable_open(inode, file); if (err < 0) return err; if (mutex_lock_interruptible(&register_mutex)) return -ERESTARTSYS; client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS); if (client == NULL) { mutex_unlock(&register_mutex); return -ENOMEM; /* failure code */ } mode = snd_seq_file_flags(file); if (mode & SNDRV_SEQ_LFLG_INPUT) client->accept_input = 1; if (mode & SNDRV_SEQ_LFLG_OUTPUT) client->accept_output = 1; user = &client->data.user; user->fifo = NULL; user->fifo_pool_size = 0; if (mode & SNDRV_SEQ_LFLG_INPUT) { user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS; user->fifo = snd_seq_fifo_new(user->fifo_pool_size); if (user->fifo == NULL) { seq_free_client1(client); kfree(client); mutex_unlock(&register_mutex); return -ENOMEM; } } usage_alloc(&client_usage, 1); client->type = USER_CLIENT; mutex_unlock(&register_mutex); c = client->number; file->private_data = client; /* fill client data */ user->file = file; sprintf(client->name, "Client-%d", c); client->data.user.owner = get_pid(task_pid(current)); /* make others aware this new client */ snd_seq_system_client_ev_client_start(c); return 0; } /* delete a user client */ static int snd_seq_release(struct inode *inode, struct file *file) { struct snd_seq_client *client = file->private_data; if (client) { seq_free_client(client); if (client->data.user.fifo) snd_seq_fifo_delete(&client->data.user.fifo); put_pid(client->data.user.owner); kfree(client); } return 0; } /* handle client read() */ /* possible error values: * -ENXIO invalid client or file open mode * -ENOSPC FIFO overflow (the flag is cleared after this error report) * -EINVAL no enough user-space buffer to write the whole event * -EFAULT seg. fault during copy to user space */ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct snd_seq_client *client = file->private_data; struct snd_seq_fifo *fifo; int err; long result = 0; struct snd_seq_event_cell *cell; if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT)) return -ENXIO; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; /* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO; if (!client->accept_input || (fifo = client->data.user.fifo) == NULL) return -ENXIO; if (atomic_read(&fifo->overflow) > 0) { /* buffer overflow is detected */ snd_seq_fifo_clear(fifo); /* return error code */ return -ENOSPC; } cell = NULL; err = 0; snd_seq_fifo_lock(fifo); /* while data available in queue */ while (count >= sizeof(struct snd_seq_event)) { int nonblock; nonblock = (file->f_flags & O_NONBLOCK) || result > 0; if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) { break; } if (snd_seq_ev_is_variable(&cell->event)) { struct snd_seq_event tmpev; tmpev = cell->event; tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK; if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) { err = -EFAULT; break; } count -= sizeof(struct snd_seq_event); buf += sizeof(struct snd_seq_event); err = snd_seq_expand_var_event(&cell->event, count, (char __force *)buf, 0, sizeof(struct snd_seq_event)); if (err < 0) break; result += err; count -= err; buf += err; } else { if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) { err = -EFAULT; break; } count -= sizeof(struct snd_seq_event); buf += sizeof(struct snd_seq_event); } snd_seq_cell_free(cell); cell = NULL; /* to be sure */ result += sizeof(struct snd_seq_event); } if (err < 0) { if (cell) snd_seq_fifo_cell_putback(fifo, cell); if (err == -EAGAIN && result > 0) err = 0; } snd_seq_fifo_unlock(fifo); return (err < 0) ? err : result; } /* * check access permission to the port */ static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags) { if ((port->capability & flags) != flags) return 0; return flags; } /* * check if the destination client is available, and return the pointer * if filter is non-zero, client filter bitmap is tested. */ static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event, int filter) { struct snd_seq_client *dest; dest = snd_seq_client_use_ptr(event->dest.client); if (dest == NULL) return NULL; if (! dest->accept_input) goto __not_avail; if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) && ! test_bit(event->type, dest->event_filter)) goto __not_avail; if (filter && !(dest->filter & filter)) goto __not_avail; return dest; /* ok - accessible */ __not_avail: snd_seq_client_unlock(dest); return NULL; } /* * Return the error event. * * If the receiver client is a user client, the original event is * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If * the original event is also variable length, the external data is * copied after the event record. * If the receiver client is a kernel client, the original event is * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra * kmalloc. */ static int bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop) { struct snd_seq_event bounce_ev; int result; if (client == NULL || ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) || ! client->accept_input) return 0; /* ignored */ /* set up quoted error */ memset(&bounce_ev, 0, sizeof(bounce_ev)); bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR; bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED; bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT; bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM; bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE; bounce_ev.dest.client = client->number; bounce_ev.dest.port = event->source.port; bounce_ev.data.quote.origin = event->dest; bounce_ev.data.quote.event = event; bounce_ev.data.quote.value = -err; /* use positive value */ result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1); if (result < 0) { client->event_lost++; return result; } return result; } /* * rewrite the time-stamp of the event record with the curren time * of the given queue. * return non-zero if updated. */ static int update_timestamp_of_queue(struct snd_seq_event *event, int queue, int real_time) { struct snd_seq_queue *q; q = queueptr(queue); if (! q) return 0; event->queue = queue; event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK; if (real_time) { event->time.time = snd_seq_timer_get_cur_time(q->timer); event->flags |= SNDRV_SEQ_TIME_STAMP_REAL; } else { event->time.tick = snd_seq_timer_get_cur_tick(q->timer); event->flags |= SNDRV_SEQ_TIME_STAMP_TICK; } queuefree(q); return 1; } /* * deliver an event to the specified destination. * if filter is non-zero, client filter bitmap is tested. * * RETURN VALUE: 0 : if succeeded * <0 : error */ static int snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int filter, int atomic, int hop) { struct snd_seq_client *dest = NULL; struct snd_seq_client_port *dest_port = NULL; int result = -ENOENT; int direct; direct = snd_seq_ev_is_direct(event); dest = get_event_dest_client(event, filter); if (dest == NULL) goto __skip; dest_port = snd_seq_port_use_ptr(dest, event->dest.port); if (dest_port == NULL) goto __skip; /* check permission */ if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) { result = -EPERM; goto __skip; } if (dest_port->timestamping) update_timestamp_of_queue(event, dest_port->time_queue, dest_port->time_real); switch (dest->type) { case USER_CLIENT: if (dest->data.user.fifo) result = snd_seq_fifo_event_in(dest->data.user.fifo, event); break; case KERNEL_CLIENT: if (dest_port->event_input == NULL) break; result = dest_port->event_input(event, direct, dest_port->private_data, atomic, hop); break; default: break; } __skip: if (dest_port) snd_seq_port_unlock(dest_port); if (dest) snd_seq_client_unlock(dest); if (result < 0 && !direct) { result = bounce_error_event(client, event, result, atomic, hop); } return result; } /* * send the event to all subscribers: */ static int deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { struct snd_seq_subscribers *subs; int err, result = 0, num_ev = 0; struct snd_seq_event event_saved; struct snd_seq_client_port *src_port; struct snd_seq_port_subs_info *grp; src_port = snd_seq_port_use_ptr(client, event->source.port); if (src_port == NULL) return -EINVAL; /* invalid source port */ /* save original event record */ event_saved = *event; grp = &src_port->c_src; /* lock list */ if (atomic) read_lock(&grp->list_lock); else down_read(&grp->list_mutex); list_for_each_entry(subs, &grp->list_head, src_list) { /* both ports ready? */ if (atomic_read(&subs->ref_count) != 2) continue; event->dest = subs->info.dest; if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) /* convert time according to flag with subscription */ update_timestamp_of_queue(event, subs->info.queue, subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL); err = snd_seq_deliver_single_event(client, event, 0, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result) result = err; continue; } num_ev++; /* restore original event record */ *event = event_saved; } if (atomic) read_unlock(&grp->list_lock); else up_read(&grp->list_mutex); *event = event_saved; /* restore */ snd_seq_port_unlock(src_port); return (result < 0) ? result : num_ev; } #ifdef SUPPORT_BROADCAST /* * broadcast to all ports: */ static int port_broadcast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { int num_ev = 0, err, result = 0; struct snd_seq_client *dest_client; struct snd_seq_client_port *port; dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST); if (dest_client == NULL) return 0; /* no matching destination */ read_lock(&dest_client->ports_lock); list_for_each_entry(port, &dest_client->ports_list_head, list) { event->dest.port = port->addr.port; /* pass NULL as source client to avoid error bounce */ err = snd_seq_deliver_single_event(NULL, event, SNDRV_SEQ_FILTER_BROADCAST, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result) result = err; continue; } num_ev++; } read_unlock(&dest_client->ports_lock); snd_seq_client_unlock(dest_client); event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */ return (result < 0) ? result : num_ev; } /* * send the event to all clients: * if destination port is also ADDRESS_BROADCAST, deliver to all ports. */ static int broadcast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { int err, result = 0, num_ev = 0; int dest; struct snd_seq_addr addr; addr = event->dest; /* save */ for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) { /* don't send to itself */ if (dest == client->number) continue; event->dest.client = dest; event->dest.port = addr.port; if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST) err = port_broadcast_event(client, event, atomic, hop); else /* pass NULL as source client to avoid error bounce */ err = snd_seq_deliver_single_event(NULL, event, SNDRV_SEQ_FILTER_BROADCAST, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result) result = err; continue; } num_ev += err; } event->dest = addr; /* restore */ return (result < 0) ? result : num_ev; } /* multicast - not supported yet */ static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { pr_debug("ALSA: seq: multicast not supported yet.\n"); return 0; /* ignored */ } #endif /* SUPPORT_BROADCAST */ /* deliver an event to the destination port(s). * if the event is to subscribers or broadcast, the event is dispatched * to multiple targets. * * RETURN VALUE: n > 0 : the number of delivered events. * n == 0 : the event was not passed to any client. * n < 0 : error - event was not processed. */ static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { int result; hop++; if (hop >= SNDRV_SEQ_MAX_HOPS) { pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n", event->source.client, event->source.port, event->dest.client, event->dest.port); return -EMLINK; } if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) result = deliver_to_subscribers(client, event, atomic, hop); #ifdef SUPPORT_BROADCAST else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST) result = broadcast_event(client, event, atomic, hop); else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS) result = multicast_event(client, event, atomic, hop); else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST) result = port_broadcast_event(client, event, atomic, hop); #endif else result = snd_seq_deliver_single_event(client, event, 0, atomic, hop); return result; } /* * dispatch an event cell: * This function is called only from queue check routines in timer * interrupts or after enqueued. * The event cell shall be released or re-queued in this function. * * RETURN VALUE: n > 0 : the number of delivered events. * n == 0 : the event was not passed to any client. * n < 0 : error - event was not processed. */ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop) { struct snd_seq_client *client; int result; if (snd_BUG_ON(!cell)) return -EINVAL; client = snd_seq_client_use_ptr(cell->event.source.client); if (client == NULL) { snd_seq_cell_free(cell); /* release this cell */ return -EINVAL; } if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) { /* NOTE event: * the event cell is re-used as a NOTE-OFF event and * enqueued again. */ struct snd_seq_event tmpev, *ev; /* reserve this event to enqueue note-off later */ tmpev = cell->event; tmpev.type = SNDRV_SEQ_EVENT_NOTEON; result = snd_seq_deliver_event(client, &tmpev, atomic, hop); /* * This was originally a note event. We now re-use the * cell for the note-off event. */ ev = &cell->event; ev->type = SNDRV_SEQ_EVENT_NOTEOFF; ev->flags |= SNDRV_SEQ_PRIORITY_HIGH; /* add the duration time */ switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK: ev->time.tick += ev->data.note.duration; break; case SNDRV_SEQ_TIME_STAMP_REAL: /* unit for duration is ms */ ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000); ev->time.time.tv_sec += ev->data.note.duration / 1000 + ev->time.time.tv_nsec / 1000000000; ev->time.time.tv_nsec %= 1000000000; break; } ev->data.note.velocity = ev->data.note.off_velocity; /* Now queue this cell as the note off event */ if (snd_seq_enqueue_event(cell, atomic, hop) < 0) snd_seq_cell_free(cell); /* release this cell */ } else { /* Normal events: * event cell is freed after processing the event */ result = snd_seq_deliver_event(client, &cell->event, atomic, hop); snd_seq_cell_free(cell); } snd_seq_client_unlock(client); return result; } /* Allocate a cell from client pool and enqueue it to queue: * if pool is empty and blocking is TRUE, sleep until a new cell is * available. */ static int snd_seq_client_enqueue_event(struct snd_seq_client *client, struct snd_seq_event *event, struct file *file, int blocking, int atomic, int hop) { struct snd_seq_event_cell *cell; int err; /* special queue values - force direct passing */ if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; event->queue = SNDRV_SEQ_QUEUE_DIRECT; } else #ifdef SUPPORT_BROADCAST if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST; event->queue = SNDRV_SEQ_QUEUE_DIRECT; } #endif if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { /* check presence of source port */ struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port); if (src_port == NULL) return -EINVAL; snd_seq_port_unlock(src_port); } /* direct event processing without enqueued */ if (snd_seq_ev_is_direct(event)) { if (event->type == SNDRV_SEQ_EVENT_NOTE) return -EINVAL; /* this event must be enqueued! */ return snd_seq_deliver_event(client, event, atomic, hop); } /* Not direct, normal queuing */ if (snd_seq_queue_is_used(event->queue, client->number) <= 0) return -EINVAL; /* invalid queue */ if (! snd_seq_write_pool_allocated(client)) return -ENXIO; /* queue is not allocated */ /* allocate an event cell */ err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); if (err < 0) return err; /* we got a cell. enqueue it. */ if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) { snd_seq_cell_free(cell); return err; } return 0; } /* * check validity of event type and data length. * return non-zero if invalid. */ static int check_event_type_and_length(struct snd_seq_event *ev) { switch (snd_seq_ev_length_type(ev)) { case SNDRV_SEQ_EVENT_LENGTH_FIXED: if (snd_seq_ev_is_variable_type(ev)) return -EINVAL; break; case SNDRV_SEQ_EVENT_LENGTH_VARIABLE: if (! snd_seq_ev_is_variable_type(ev) || (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN) return -EINVAL; break; case SNDRV_SEQ_EVENT_LENGTH_VARUSR: if (! snd_seq_ev_is_direct(ev)) return -EINVAL; break; } return 0; } /* handle write() */ /* possible error values: * -ENXIO invalid client or file open mode * -ENOMEM malloc failed * -EFAULT seg. fault during copy from user space * -EINVAL invalid event * -EAGAIN no space in output pool * -EINTR interrupts while sleep * -EMLINK too many hops * others depends on return value from driver callback */ static ssize_t snd_seq_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct snd_seq_client *client = file->private_data; int written = 0, len; int err = -EINVAL; struct snd_seq_event event; if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) return -ENXIO; /* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO; if (!client->accept_output || client->pool == NULL) return -ENXIO; /* allocate the pool now if the pool is not allocated yet */ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { if (snd_seq_pool_init(client->pool) < 0) return -ENOMEM; } /* only process whole events */ while (count >= sizeof(struct snd_seq_event)) { /* Read in the event header from the user */ len = sizeof(event); if (copy_from_user(&event, buf, len)) { err = -EFAULT; break; } event.source.client = client->number; /* fill in client number */ /* Check for extension data length */ if (check_event_type_and_length(&event)) { err = -EINVAL; break; } /* check for special events */ if (event.type == SNDRV_SEQ_EVENT_NONE) goto __skip_event; else if (snd_seq_ev_is_reserved(&event)) { err = -EINVAL; break; } if (snd_seq_ev_is_variable(&event)) { int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK; if ((size_t)(extlen + len) > count) { /* back out, will get an error this time or next */ err = -EINVAL; break; } /* set user space pointer */ event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR; event.data.ext.ptr = (char __force *)buf + sizeof(struct snd_seq_event); len += extlen; /* increment data length */ } else { #ifdef CONFIG_COMPAT if (client->convert32 && snd_seq_ev_is_varusr(&event)) { void *ptr = (void __force *)compat_ptr(event.data.raw32.d[1]); event.data.ext.ptr = ptr; } #endif } /* ok, enqueue it */ err = snd_seq_client_enqueue_event(client, &event, file, !(file->f_flags & O_NONBLOCK), 0, 0); if (err < 0) break; __skip_event: /* Update pointers and counts */ count -= len; buf += len; written += len; } return written ? written : err; } /* * handle polling */ static unsigned int snd_seq_poll(struct file *file, poll_table * wait) { struct snd_seq_client *client = file->private_data; unsigned int mask = 0; /* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO; if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) && client->data.user.fifo) { /* check if data is available in the outqueue */ if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait)) mask |= POLLIN | POLLRDNORM; } if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) { /* check if data is available in the pool */ if (!snd_seq_write_pool_allocated(client) || snd_seq_pool_poll_wait(client->pool, file, wait)) mask |= POLLOUT | POLLWRNORM; } return mask; } /*-----------------------------------------------------*/ static int snd_seq_ioctl_pversion(struct snd_seq_client *client, void *arg) { int *pversion = arg; *pversion = SNDRV_SEQ_VERSION; return 0; } static int snd_seq_ioctl_client_id(struct snd_seq_client *client, void *arg) { int *client_id = arg; *client_id = client->number; return 0; } /* SYSTEM_INFO ioctl() */ static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void *arg) { struct snd_seq_system_info *info = arg; memset(info, 0, sizeof(*info)); /* fill the info fields */ info->queues = SNDRV_SEQ_MAX_QUEUES; info->clients = SNDRV_SEQ_MAX_CLIENTS; info->ports = SNDRV_SEQ_MAX_PORTS; info->channels = 256; /* fixed limit */ info->cur_clients = client_usage.cur; info->cur_queues = snd_seq_queue_get_cur_queues(); return 0; } /* RUNNING_MODE ioctl() */ static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg) { struct snd_seq_running_info *info = arg; struct snd_seq_client *cptr; int err = 0; /* requested client number */ cptr = snd_seq_client_use_ptr(info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ #ifdef SNDRV_BIG_ENDIAN if (!info->big_endian) { err = -EINVAL; goto __err; } #else if (info->big_endian) { err = -EINVAL; goto __err; } #endif if (info->cpu_mode > sizeof(long)) { err = -EINVAL; goto __err; } cptr->convert32 = (info->cpu_mode < sizeof(long)); __err: snd_seq_client_unlock(cptr); return err; } /* CLIENT_INFO ioctl() */ static void get_client_info(struct snd_seq_client *cptr, struct snd_seq_client_info *info) { info->client = cptr->number; /* fill the info fields */ info->type = cptr->type; strcpy(info->name, cptr->name); info->filter = cptr->filter; info->event_lost = cptr->event_lost; memcpy(info->event_filter, cptr->event_filter, 32); info->num_ports = cptr->num_ports; if (cptr->type == USER_CLIENT) info->pid = pid_vnr(cptr->data.user.owner); else info->pid = -1; if (cptr->type == KERNEL_CLIENT) info->card = cptr->data.kernel.card ? cptr->data.kernel.card->number : -1; else info->card = -1; memset(info->reserved, 0, sizeof(info->reserved)); } static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client, void *arg) { struct snd_seq_client_info *client_info = arg; struct snd_seq_client *cptr; /* requested client number */ cptr = snd_seq_client_use_ptr(client_info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ get_client_info(cptr, client_info); snd_seq_client_unlock(cptr); return 0; } /* CLIENT_INFO ioctl() */ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client, void *arg) { struct snd_seq_client_info *client_info = arg; /* it is not allowed to set the info fields for an another client */ if (client->number != client_info->client) return -EPERM; /* also client type must be set now */ if (client->type != client_info->type) return -EINVAL; /* fill the info fields */ if (client_info->name[0]) strlcpy(client->name, client_info->name, sizeof(client->name)); client->filter = client_info->filter; client->event_lost = client_info->event_lost; memcpy(client->event_filter, client_info->event_filter, 32); return 0; } /* * CREATE PORT ioctl() */ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client_port *port; struct snd_seq_port_callback *callback; int port_idx; /* it is not allowed to create the port for an another client */ if (info->addr.client != client->number) return -EPERM; port = snd_seq_create_port(client, (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info->addr.port : -1); if (port == NULL) return -ENOMEM; if (client->type == USER_CLIENT && info->kernel) { port_idx = port->addr.port; snd_seq_port_unlock(port); snd_seq_delete_port(client, port_idx); return -EINVAL; } if (client->type == KERNEL_CLIENT) { if ((callback = info->kernel) != NULL) { if (callback->owner) port->owner = callback->owner; port->private_data = callback->private_data; port->private_free = callback->private_free; port->event_input = callback->event_input; port->c_src.open = callback->subscribe; port->c_src.close = callback->unsubscribe; port->c_dest.open = callback->use; port->c_dest.close = callback->unuse; } } info->addr = port->addr; snd_seq_set_port_info(port, info); snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); snd_seq_port_unlock(port); return 0; } /* * DELETE PORT ioctl() */ static int snd_seq_ioctl_delete_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; int err; /* it is not allowed to remove the port for an another client */ if (info->addr.client != client->number) return -EPERM; err = snd_seq_delete_port(client, info->addr.port); if (err >= 0) snd_seq_system_client_ev_port_exit(client->number, info->addr.port); return err; } /* * GET_PORT_INFO ioctl() (on any client) */ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client *cptr; struct snd_seq_client_port *port; cptr = snd_seq_client_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; port = snd_seq_port_use_ptr(cptr, info->addr.port); if (port == NULL) { snd_seq_client_unlock(cptr); return -ENOENT; /* don't change */ } /* get port info */ snd_seq_get_port_info(port, info); snd_seq_port_unlock(port); snd_seq_client_unlock(cptr); return 0; } /* * SET_PORT_INFO ioctl() (only ports on this/own client) */ static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client_port *port; if (info->addr.client != client->number) /* only set our own ports ! */ return -EPERM; port = snd_seq_port_use_ptr(client, info->addr.port); if (port) { snd_seq_set_port_info(port, info); snd_seq_port_unlock(port); } return 0; } /* * port subscription (connection) */ #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ) #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE) static int check_subscription_permission(struct snd_seq_client *client, struct snd_seq_client_port *sport, struct snd_seq_client_port *dport, struct snd_seq_port_subscribe *subs) { if (client->number != subs->sender.client && client->number != subs->dest.client) { /* connection by third client - check export permission */ if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; } /* check read permission */ /* if sender or receiver is the subscribing client itself, * no permission check is necessary */ if (client->number != subs->sender.client) { if (! check_port_perm(sport, PERM_RD)) return -EPERM; } /* check write permission */ if (client->number != subs->dest.client) { if (! check_port_perm(dport, PERM_WR)) return -EPERM; } return 0; } /* * send an subscription notify event to user client: * client must be user client. */ int snd_seq_client_notify_subscription(int client, int port, struct snd_seq_port_subscribe *info, int evtype) { struct snd_seq_event event; memset(&event, 0, sizeof(event)); event.type = evtype; event.data.connect.dest = info->dest; event.data.connect.sender = info->sender; return snd_seq_system_notify(client, port, &event); /* non-atomic */ } /* * add to port's subscription list IOCTL interface */ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_subscribe *subs = arg; int result = -EINVAL; struct snd_seq_client *receiver = NULL, *sender = NULL; struct snd_seq_client_port *sport = NULL, *dport = NULL; if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL) goto __end; if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL) goto __end; if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL) goto __end; if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL) goto __end; result = check_subscription_permission(client, sport, dport, subs); if (result < 0) goto __end; /* connect them */ result = snd_seq_port_connect(client, sender, sport, receiver, dport, subs); if (! result) /* broadcast announce */ snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0, subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED); __end: if (sport) snd_seq_port_unlock(sport); if (dport) snd_seq_port_unlock(dport); if (sender) snd_seq_client_unlock(sender); if (receiver) snd_seq_client_unlock(receiver); return result; } /* * remove from port's subscription list */ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_subscribe *subs = arg; int result = -ENXIO; struct snd_seq_client *receiver = NULL, *sender = NULL; struct snd_seq_client_port *sport = NULL, *dport = NULL; if ((receiver = snd_seq_client_use_ptr(subs->dest.client)) == NULL) goto __end; if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL) goto __end; if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL) goto __end; if ((dport = snd_seq_port_use_ptr(receiver, subs->dest.port)) == NULL) goto __end; result = check_subscription_permission(client, sport, dport, subs); if (result < 0) goto __end; result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, subs); if (! result) /* broadcast announce */ snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0, subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED); __end: if (sport) snd_seq_port_unlock(sport); if (dport) snd_seq_port_unlock(dport); if (sender) snd_seq_client_unlock(sender); if (receiver) snd_seq_client_unlock(receiver); return result; } /* CREATE_QUEUE ioctl() */ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q; q = snd_seq_queue_alloc(client->number, info->locked, info->flags); if (IS_ERR(q)) return PTR_ERR(q); info->queue = q->queue; info->locked = q->locked; info->owner = q->owner; /* set queue name */ if (!info->name[0]) snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); strlcpy(q->name, info->name, sizeof(q->name)); snd_use_lock_free(&q->use_lock); return 0; } /* DELETE_QUEUE ioctl() */ static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; return snd_seq_queue_delete(client->number, info->queue); } /* GET_QUEUE_INFO ioctl() */ static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q; q = queueptr(info->queue); if (q == NULL) return -EINVAL; memset(info, 0, sizeof(*info)); info->queue = q->queue; info->owner = q->owner; info->locked = q->locked; strlcpy(info->name, q->name, sizeof(info->name)); queuefree(q); return 0; } /* SET_QUEUE_INFO ioctl() */ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q; if (info->owner != client->number) return -EINVAL; /* change owner/locked permission */ if (snd_seq_queue_check_access(info->queue, client->number)) { if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0) return -EPERM; if (info->locked) snd_seq_queue_use(info->queue, client->number, 1); } else { return -EPERM; } q = queueptr(info->queue); if (! q) return -EINVAL; if (q->owner != client->number) { queuefree(q); return -EPERM; } strlcpy(q->name, info->name, sizeof(q->name)); queuefree(q); return 0; } /* GET_NAMED_QUEUE ioctl() */ static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; struct snd_seq_queue *q; q = snd_seq_queue_find_name(info->name); if (q == NULL) return -EINVAL; info->queue = q->queue; info->owner = q->owner; info->locked = q->locked; queuefree(q); return 0; } /* GET_QUEUE_STATUS ioctl() */ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_status *status = arg; struct snd_seq_queue *queue; struct snd_seq_timer *tmr; queue = queueptr(status->queue); if (queue == NULL) return -EINVAL; memset(status, 0, sizeof(*status)); status->queue = queue->queue; tmr = queue->timer; status->events = queue->tickq->cells + queue->timeq->cells; status->time = snd_seq_timer_get_cur_time(tmr); status->tick = snd_seq_timer_get_cur_tick(tmr); status->running = tmr->running; status->flags = queue->flags; queuefree(queue); return 0; } /* GET_QUEUE_TEMPO ioctl() */ static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_tempo *tempo = arg; struct snd_seq_queue *queue; struct snd_seq_timer *tmr; queue = queueptr(tempo->queue); if (queue == NULL) return -EINVAL; memset(tempo, 0, sizeof(*tempo)); tempo->queue = queue->queue; tmr = queue->timer; tempo->tempo = tmr->tempo; tempo->ppq = tmr->ppq; tempo->skew_value = tmr->skew; tempo->skew_base = tmr->skew_base; queuefree(queue); return 0; } /* SET_QUEUE_TEMPO ioctl() */ int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo) { if (!snd_seq_queue_check_access(tempo->queue, client)) return -EPERM; return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo); } EXPORT_SYMBOL(snd_seq_set_queue_tempo); static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_tempo *tempo = arg; int result; result = snd_seq_set_queue_tempo(client->number, tempo); return result < 0 ? result : 0; } /* GET_QUEUE_TIMER ioctl() */ static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_timer *timer = arg; struct snd_seq_queue *queue; struct snd_seq_timer *tmr; queue = queueptr(timer->queue); if (queue == NULL) return -EINVAL; if (mutex_lock_interruptible(&queue->timer_mutex)) { queuefree(queue); return -ERESTARTSYS; } tmr = queue->timer; memset(timer, 0, sizeof(*timer)); timer->queue = queue->queue; timer->type = tmr->type; if (tmr->type == SNDRV_SEQ_TIMER_ALSA) { timer->u.alsa.id = tmr->alsa_id; timer->u.alsa.resolution = tmr->preferred_resolution; } mutex_unlock(&queue->timer_mutex); queuefree(queue); return 0; } /* SET_QUEUE_TIMER ioctl() */ static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_timer *timer = arg; int result = 0; if (timer->type != SNDRV_SEQ_TIMER_ALSA) return -EINVAL; if (snd_seq_queue_check_access(timer->queue, client->number)) { struct snd_seq_queue *q; struct snd_seq_timer *tmr; q = queueptr(timer->queue); if (q == NULL) return -ENXIO; if (mutex_lock_interruptible(&q->timer_mutex)) { queuefree(q); return -ERESTARTSYS; } tmr = q->timer; snd_seq_queue_timer_close(timer->queue); tmr->type = timer->type; if (tmr->type == SNDRV_SEQ_TIMER_ALSA) { tmr->alsa_id = timer->u.alsa.id; tmr->preferred_resolution = timer->u.alsa.resolution; } result = snd_seq_queue_timer_open(timer->queue); mutex_unlock(&q->timer_mutex); queuefree(q); } else { return -EPERM; } return result; } /* GET_QUEUE_CLIENT ioctl() */ static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_client *info = arg; int used; used = snd_seq_queue_is_used(info->queue, client->number); if (used < 0) return -EINVAL; info->used = used; info->client = client->number; return 0; } /* SET_QUEUE_CLIENT ioctl() */ static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_client *info = arg; int err; if (info->used >= 0) { err = snd_seq_queue_use(info->queue, client->number, info->used); if (err < 0) return err; } return snd_seq_ioctl_get_queue_client(client, arg); } /* GET_CLIENT_POOL ioctl() */ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client, void *arg) { struct snd_seq_client_pool *info = arg; struct snd_seq_client *cptr; cptr = snd_seq_client_use_ptr(info->client); if (cptr == NULL) return -ENOENT; memset(info, 0, sizeof(*info)); info->client = cptr->number; info->output_pool = cptr->pool->size; info->output_room = cptr->pool->room; info->output_free = info->output_pool; info->output_free = snd_seq_unused_cells(cptr->pool); if (cptr->type == USER_CLIENT) { info->input_pool = cptr->data.user.fifo_pool_size; info->input_free = info->input_pool; if (cptr->data.user.fifo) info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool); } else { info->input_pool = 0; info->input_free = 0; } snd_seq_client_unlock(cptr); return 0; } /* SET_CLIENT_POOL ioctl() */ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, void *arg) { struct snd_seq_client_pool *info = arg; int rc; if (client->number != info->client) return -EINVAL; /* can't change other clients */ if (info->output_pool >= 1 && info->output_pool <= SNDRV_SEQ_MAX_EVENTS && (! snd_seq_write_pool_allocated(client) || info->output_pool != client->pool->size)) { if (snd_seq_write_pool_allocated(client)) { /* remove all existing cells */ snd_seq_pool_mark_closing(client->pool); snd_seq_queue_client_leave_cells(client->number); snd_seq_pool_done(client->pool); } client->pool->size = info->output_pool; rc = snd_seq_pool_init(client->pool); if (rc < 0) return rc; } if (client->type == USER_CLIENT && client->data.user.fifo != NULL && info->input_pool >= 1 && info->input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS && info->input_pool != client->data.user.fifo_pool_size) { /* change pool size */ rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool); if (rc < 0) return rc; client->data.user.fifo_pool_size = info->input_pool; } if (info->output_room >= 1 && info->output_room <= client->pool->size) { client->pool->room = info->output_room; } return snd_seq_ioctl_get_client_pool(client, arg); } /* REMOVE_EVENTS ioctl() */ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client, void *arg) { struct snd_seq_remove_events *info = arg; /* * Input mostly not implemented XXX. */ if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) { /* * No restrictions so for a user client we can clear * the whole fifo */ if (client->type == USER_CLIENT && client->data.user.fifo) snd_seq_fifo_clear(client->data.user.fifo); } if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT) snd_seq_queue_remove_cells(client->number, info); return 0; } /* * get subscription info */ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client, void *arg) { struct snd_seq_port_subscribe *subs = arg; int result; struct snd_seq_client *sender = NULL; struct snd_seq_client_port *sport = NULL; struct snd_seq_subscribers *p; result = -EINVAL; if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL) goto __end; if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL) goto __end; p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest); if (p) { result = 0; *subs = p->info; } else result = -ENOENT; __end: if (sport) snd_seq_port_unlock(sport); if (sender) snd_seq_client_unlock(sender); return result; } /* * get subscription info - check only its presence */ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg) { struct snd_seq_query_subs *subs = arg; int result = -ENXIO; struct snd_seq_client *cptr = NULL; struct snd_seq_client_port *port = NULL; struct snd_seq_port_subs_info *group; struct list_head *p; int i; if ((cptr = snd_seq_client_use_ptr(subs->root.client)) == NULL) goto __end; if ((port = snd_seq_port_use_ptr(cptr, subs->root.port)) == NULL) goto __end; switch (subs->type) { case SNDRV_SEQ_QUERY_SUBS_READ: group = &port->c_src; break; case SNDRV_SEQ_QUERY_SUBS_WRITE: group = &port->c_dest; break; default: goto __end; } down_read(&group->list_mutex); /* search for the subscriber */ subs->num_subs = group->count; i = 0; result = -ENOENT; list_for_each(p, &group->list_head) { if (i++ == subs->index) { /* found! */ struct snd_seq_subscribers *s; if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) { s = list_entry(p, struct snd_seq_subscribers, src_list); subs->addr = s->info.dest; } else { s = list_entry(p, struct snd_seq_subscribers, dest_list); subs->addr = s->info.sender; } subs->flags = s->info.flags; subs->queue = s->info.queue; result = 0; break; } } up_read(&group->list_mutex); __end: if (port) snd_seq_port_unlock(port); if (cptr) snd_seq_client_unlock(cptr); return result; } /* * query next client */ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, void *arg) { struct snd_seq_client_info *info = arg; struct snd_seq_client *cptr = NULL; /* search for next client */ info->client++; if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { cptr = snd_seq_client_use_ptr(info->client); if (cptr) break; /* found */ } if (cptr == NULL) return -ENOENT; get_client_info(cptr, info); snd_seq_client_unlock(cptr); return 0; } /* * query next port */ static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client, void *arg) { struct snd_seq_port_info *info = arg; struct snd_seq_client *cptr; struct snd_seq_client_port *port = NULL; cptr = snd_seq_client_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; /* search for next port */ info->addr.port++; port = snd_seq_port_query_nearest(cptr, info); if (port == NULL) { snd_seq_client_unlock(cptr); return -ENOENT; } /* get port info */ info->addr = port->addr; snd_seq_get_port_info(port, info); snd_seq_port_unlock(port); snd_seq_client_unlock(cptr); return 0; } /* -------------------------------------------------------- */ static const struct ioctl_handler { unsigned int cmd; int (*func)(struct snd_seq_client *client, void *arg); } ioctl_handlers[] = { { SNDRV_SEQ_IOCTL_PVERSION, snd_seq_ioctl_pversion }, { SNDRV_SEQ_IOCTL_CLIENT_ID, snd_seq_ioctl_client_id }, { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info }, { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode }, { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info }, { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info }, { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port }, { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port }, { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info }, { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info }, { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port }, { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port }, { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue }, { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue }, { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info }, { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info }, { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue }, { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status }, { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo }, { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo }, { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer }, { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer }, { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client }, { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client }, { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool }, { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool }, { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription }, { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client }, { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port }, { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events }, { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs }, { 0, NULL }, }; static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_seq_client *client = file->private_data; /* To use kernel stack for ioctl data. */ union { int pversion; int client_id; struct snd_seq_system_info system_info; struct snd_seq_running_info running_info; struct snd_seq_client_info client_info; struct snd_seq_port_info port_info; struct snd_seq_port_subscribe port_subscribe; struct snd_seq_queue_info queue_info; struct snd_seq_queue_status queue_status; struct snd_seq_queue_tempo tempo; struct snd_seq_queue_timer queue_timer; struct snd_seq_queue_client queue_client; struct snd_seq_client_pool client_pool; struct snd_seq_remove_events remove_events; struct snd_seq_query_subs query_subs; } buf; const struct ioctl_handler *handler; unsigned long size; int err; if (snd_BUG_ON(!client)) return -ENXIO; for (handler = ioctl_handlers; handler->cmd > 0; ++handler) { if (handler->cmd == cmd) break; } if (handler->cmd == 0) return -ENOTTY; memset(&buf, 0, sizeof(buf)); /* * All of ioctl commands for ALSA sequencer get an argument of size * within 13 bits. We can safely pick up the size from the command. */ size = _IOC_SIZE(handler->cmd); if (handler->cmd & IOC_IN) { if (copy_from_user(&buf, (const void __user *)arg, size)) return -EFAULT; } err = handler->func(client, &buf); if (err >= 0) { /* Some commands includes a bug in 'dir' field. */ if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL || (handler->cmd & IOC_OUT)) if (copy_to_user((void __user *)arg, &buf, size)) return -EFAULT; } return err; } #ifdef CONFIG_COMPAT #include "seq_compat.c" #else #define snd_seq_ioctl_compat NULL #endif /* -------------------------------------------------------- */ /* exported to kernel modules */ int snd_seq_create_kernel_client(struct snd_card *card, int client_index, const char *name_fmt, ...) { struct snd_seq_client *client; va_list args; if (snd_BUG_ON(in_interrupt())) return -EBUSY; if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD) return -EINVAL; if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS) return -EINVAL; if (mutex_lock_interruptible(&register_mutex)) return -ERESTARTSYS; if (card) { client_index += SNDRV_SEQ_GLOBAL_CLIENTS + card->number * SNDRV_SEQ_CLIENTS_PER_CARD; if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) client_index = -1; } /* empty write queue as default */ client = seq_create_client1(client_index, 0); if (client == NULL) { mutex_unlock(&register_mutex); return -EBUSY; /* failure code */ } usage_alloc(&client_usage, 1); client->accept_input = 1; client->accept_output = 1; client->data.kernel.card = card; va_start(args, name_fmt); vsnprintf(client->name, sizeof(client->name), name_fmt, args); va_end(args); client->type = KERNEL_CLIENT; mutex_unlock(&register_mutex); /* make others aware this new client */ snd_seq_system_client_ev_client_start(client->number); /* return client number to caller */ return client->number; } EXPORT_SYMBOL(snd_seq_create_kernel_client); /* exported to kernel modules */ int snd_seq_delete_kernel_client(int client) { struct snd_seq_client *ptr; if (snd_BUG_ON(in_interrupt())) return -EBUSY; ptr = clientptr(client); if (ptr == NULL) return -EINVAL; seq_free_client(ptr); kfree(ptr); return 0; } EXPORT_SYMBOL(snd_seq_delete_kernel_client); /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue * and snd_seq_kernel_client_enqueue_blocking */ static int kernel_client_enqueue(int client, struct snd_seq_event *ev, struct file *file, int blocking, int atomic, int hop) { struct snd_seq_client *cptr; int result; if (snd_BUG_ON(!ev)) return -EINVAL; if (ev->type == SNDRV_SEQ_EVENT_NONE) return 0; /* ignore this */ if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return -EINVAL; /* quoted events can't be enqueued */ /* fill in client number */ ev->source.client = client; if (check_event_type_and_length(ev)) return -EINVAL; cptr = snd_seq_client_use_ptr(client); if (cptr == NULL) return -EINVAL; if (! cptr->accept_output) result = -EPERM; else /* send it */ result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); snd_seq_client_unlock(cptr); return result; } /* * exported, called by kernel clients to enqueue events (w/o blocking) * * RETURN VALUE: zero if succeed, negative if error */ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev, int atomic, int hop) { return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop); } EXPORT_SYMBOL(snd_seq_kernel_client_enqueue); /* * exported, called by kernel clients to enqueue events (with blocking) * * RETURN VALUE: zero if succeed, negative if error */ int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev, struct file *file, int atomic, int hop) { return kernel_client_enqueue(client, ev, file, 1, atomic, hop); } EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking); /* * exported, called by kernel clients to dispatch events directly to other * clients, bypassing the queues. Event time-stamp will be updated. * * RETURN VALUE: negative = delivery failed, * zero, or positive: the number of delivered events */ int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev, int atomic, int hop) { struct snd_seq_client *cptr; int result; if (snd_BUG_ON(!ev)) return -EINVAL; /* fill in client number */ ev->queue = SNDRV_SEQ_QUEUE_DIRECT; ev->source.client = client; if (check_event_type_and_length(ev)) return -EINVAL; cptr = snd_seq_client_use_ptr(client); if (cptr == NULL) return -EINVAL; if (!cptr->accept_output) result = -EPERM; else result = snd_seq_deliver_event(cptr, ev, atomic, hop); snd_seq_client_unlock(cptr); return result; } EXPORT_SYMBOL(snd_seq_kernel_client_dispatch); /** * snd_seq_kernel_client_ctl - operate a command for a client with data in * kernel space. * @clientid: A numerical ID for a client. * @cmd: An ioctl(2) command for ALSA sequencer operation. * @arg: A pointer to data in kernel space. * * Against its name, both kernel/application client can be handled by this * kernel API. A pointer of 'arg' argument should be in kernel space. * * Return: 0 at success. Negative error code at failure. */ int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg) { const struct ioctl_handler *handler; struct snd_seq_client *client; client = clientptr(clientid); if (client == NULL) return -ENXIO; for (handler = ioctl_handlers; handler->cmd > 0; ++handler) { if (handler->cmd == cmd) return handler->func(client, arg); } pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); return -ENOTTY; } EXPORT_SYMBOL(snd_seq_kernel_client_ctl); /* exported (for OSS emulator) */ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait) { struct snd_seq_client *client; client = clientptr(clientid); if (client == NULL) return -ENXIO; if (! snd_seq_write_pool_allocated(client)) return 1; if (snd_seq_pool_poll_wait(client->pool, file, wait)) return 1; return 0; } EXPORT_SYMBOL(snd_seq_kernel_client_write_poll); /*---------------------------------------------------------------------------*/ #ifdef CONFIG_SND_PROC_FS /* * /proc interface */ static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer, struct snd_seq_port_subs_info *group, int is_src, char *msg) { struct list_head *p; struct snd_seq_subscribers *s; int count = 0; down_read(&group->list_mutex); if (list_empty(&group->list_head)) { up_read(&group->list_mutex); return; } snd_iprintf(buffer, msg); list_for_each(p, &group->list_head) { if (is_src) s = list_entry(p, struct snd_seq_subscribers, src_list); else s = list_entry(p, struct snd_seq_subscribers, dest_list); if (count++) snd_iprintf(buffer, ", "); snd_iprintf(buffer, "%d:%d", is_src ? s->info.dest.client : s->info.sender.client, is_src ? s->info.dest.port : s->info.sender.port); if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue); if (group->exclusive) snd_iprintf(buffer, "[ex]"); } up_read(&group->list_mutex); snd_iprintf(buffer, "\n"); } #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-') #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-') #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e') #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-') static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer, struct snd_seq_client *client) { struct snd_seq_client_port *p; mutex_lock(&client->ports_mutex); list_for_each_entry(p, &client->ports_list_head, list) { snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c)\n", p->addr.port, p->name, FLAG_PERM_RD(p->capability), FLAG_PERM_WR(p->capability), FLAG_PERM_EX(p->capability), FLAG_PERM_DUPLEX(p->capability)); snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: "); snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: "); } mutex_unlock(&client->ports_mutex); } /* exported to seq_info.c */ void snd_seq_info_clients_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int c; struct snd_seq_client *client; snd_iprintf(buffer, "Client info\n"); snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur); snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak); snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS); snd_iprintf(buffer, "\n"); /* list the client table */ for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) { client = snd_seq_client_use_ptr(c); if (client == NULL) continue; if (client->type == NO_CLIENT) { snd_seq_client_unlock(client); continue; } snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n", c, client->name, client->type == USER_CLIENT ? "User" : "Kernel"); snd_seq_info_dump_ports(buffer, client); if (snd_seq_write_pool_allocated(client)) { snd_iprintf(buffer, " Output pool :\n"); snd_seq_info_pool(buffer, client->pool, " "); } if (client->type == USER_CLIENT && client->data.user.fifo && client->data.user.fifo->pool) { snd_iprintf(buffer, " Input pool :\n"); snd_seq_info_pool(buffer, client->data.user.fifo->pool, " "); } snd_seq_client_unlock(client); } } #endif /* CONFIG_SND_PROC_FS */ /*---------------------------------------------------------------------------*/ /* * REGISTRATION PART */ static const struct file_operations snd_seq_f_ops = { .owner = THIS_MODULE, .read = snd_seq_read, .write = snd_seq_write, .open = snd_seq_open, .release = snd_seq_release, .llseek = no_llseek, .poll = snd_seq_poll, .unlocked_ioctl = snd_seq_ioctl, .compat_ioctl = snd_seq_ioctl_compat, }; static struct device seq_dev; /* * register sequencer device */ int __init snd_sequencer_device_init(void) { int err; snd_device_initialize(&seq_dev, NULL); dev_set_name(&seq_dev, "seq"); if (mutex_lock_interruptible(&register_mutex)) return -ERESTARTSYS; err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0, &snd_seq_f_ops, NULL, &seq_dev); if (err < 0) { mutex_unlock(&register_mutex); put_device(&seq_dev); return err; } mutex_unlock(&register_mutex); return 0; } /* * unregister sequencer device */ void __exit snd_sequencer_device_done(void) { snd_unregister_device(&seq_dev); put_device(&seq_dev); }
./CrossVul/dataset_final_sorted/CWE-416/c/good_2851_0
crossvul-cpp_data_good_888_0
/* MDIO Bus interface * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_gpio.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/irq.h> #define CREATE_TRACE_POINTS #include <trace/events/mdio.h> #include "mdio-boardinfo.h" static int mdiobus_register_gpiod(struct mdio_device *mdiodev) { struct gpio_desc *gpiod = NULL; /* Deassert the optional reset signal */ if (mdiodev->dev.of_node) gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode, "reset-gpios", 0, GPIOD_OUT_LOW, "PHY reset"); if (PTR_ERR(gpiod) == -ENOENT || PTR_ERR(gpiod) == -ENOSYS) gpiod = NULL; else if (IS_ERR(gpiod)) return PTR_ERR(gpiod); mdiodev->reset = gpiod; /* Assert the reset signal again */ mdio_device_reset(mdiodev, 1); return 0; } int mdiobus_register_device(struct mdio_device *mdiodev) { int err; if (mdiodev->bus->mdio_map[mdiodev->addr]) return -EBUSY; if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) { err = mdiobus_register_gpiod(mdiodev); if (err) return err; } mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; return 0; } EXPORT_SYMBOL(mdiobus_register_device); int mdiobus_unregister_device(struct mdio_device *mdiodev) { if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) return -EINVAL; mdiodev->bus->mdio_map[mdiodev->addr] = NULL; return 0; } EXPORT_SYMBOL(mdiobus_unregister_device); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) { struct mdio_device *mdiodev = bus->mdio_map[addr]; if (!mdiodev) return NULL; if (!(mdiodev->flags & MDIO_DEVICE_FLAG_PHY)) return NULL; return container_of(mdiodev, struct phy_device, mdio); } EXPORT_SYMBOL(mdiobus_get_phy); bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) { return bus->mdio_map[addr]; } EXPORT_SYMBOL(mdiobus_is_registered_device); /** * mdiobus_alloc_size - allocate a mii_bus structure * @size: extra amount of memory to allocate for private storage. * If non-zero, then bus->priv is points to that memory. * * Description: called by a bus driver to allocate an mii_bus * structure to fill in. */ struct mii_bus *mdiobus_alloc_size(size_t size) { struct mii_bus *bus; size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN); size_t alloc_size; int i; /* If we alloc extra space, it should be aligned */ if (size) alloc_size = aligned_size + size; else alloc_size = sizeof(*bus); bus = kzalloc(alloc_size, GFP_KERNEL); if (!bus) return NULL; bus->state = MDIOBUS_ALLOCATED; if (size) bus->priv = (void *)bus + aligned_size; /* Initialise the interrupts to polling */ for (i = 0; i < PHY_MAX_ADDR; i++) bus->irq[i] = PHY_POLL; return bus; } EXPORT_SYMBOL(mdiobus_alloc_size); static void _devm_mdiobus_free(struct device *dev, void *res) { mdiobus_free(*(struct mii_bus **)res); } static int devm_mdiobus_match(struct device *dev, void *res, void *data) { struct mii_bus **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } /** * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() * @dev: Device to allocate mii_bus for * @sizeof_priv: Space to allocate for private structure. * * Managed mdiobus_alloc_size. mii_bus allocated with this function is * automatically freed on driver detach. * * If an mii_bus allocated with this function needs to be freed separately, * devm_mdiobus_free() must be used. * * RETURNS: * Pointer to allocated mii_bus on success, NULL on failure. */ struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) { struct mii_bus **ptr, *bus; ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; /* use raw alloc_dr for kmalloc caller tracing */ bus = mdiobus_alloc_size(sizeof_priv); if (bus) { *ptr = bus; devres_add(dev, ptr); } else { devres_free(ptr); } return bus; } EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size); /** * devm_mdiobus_free - Resource-managed mdiobus_free() * @dev: Device this mii_bus belongs to * @bus: the mii_bus associated with the device * * Free mii_bus allocated with devm_mdiobus_alloc_size(). */ void devm_mdiobus_free(struct device *dev, struct mii_bus *bus) { int rc; rc = devres_release(dev, _devm_mdiobus_free, devm_mdiobus_match, bus); WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_mdiobus_free); /** * mdiobus_release - mii_bus device release callback * @d: the target struct device that contains the mii_bus * * Description: called when the last reference to an mii_bus is * dropped, to free the underlying memory. */ static void mdiobus_release(struct device *d) { struct mii_bus *bus = to_mii_bus(d); BUG_ON(bus->state != MDIOBUS_RELEASED && /* for compatibility with error handling in drivers */ bus->state != MDIOBUS_ALLOCATED); kfree(bus); } static struct class mdio_bus_class = { .name = "mdio_bus", .dev_release = mdiobus_release, }; #if IS_ENABLED(CONFIG_OF_MDIO) /* Helper function for of_mdio_find_bus */ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) { return dev->of_node == mdio_bus_np; } /** * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. * * Returns a reference to the mii_bus, or NULL if none found. The * embedded struct device will have its reference count incremented, * and this must be put once the bus is finished with. * * Because the association of a device_node and mii_bus is made via * of_mdiobus_register(), the mii_bus cannot be found before it is * registered with of_mdiobus_register(). * */ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) { struct device *d; if (!mdio_bus_np) return NULL; d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np, of_mdio_bus_match); return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); /* Walk the list of subnodes of a mdio bus and look for a node that * matches the mdio device's address with its 'reg' property. If * found, set the of_node pointer for the mdio device. This allows * auto-probed phy devices to be supplied with information passed in * via DT. */ static void of_mdiobus_link_mdiodev(struct mii_bus *bus, struct mdio_device *mdiodev) { struct device *dev = &mdiodev->dev; struct device_node *child; if (dev->of_node || !bus->dev.of_node) return; for_each_available_child_of_node(bus->dev.of_node, child) { int addr; addr = of_mdio_parse_addr(dev, child); if (addr < 0) continue; if (addr == mdiodev->addr) { dev->of_node = child; dev->fwnode = of_fwnode_handle(child); return; } } } #else /* !IS_ENABLED(CONFIG_OF_MDIO) */ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio, struct mdio_device *mdiodev) { } #endif /** * mdiobus_create_device_from_board_info - create a full MDIO device given * a mdio_board_info structure * @bus: MDIO bus to create the devices on * @bi: mdio_board_info structure describing the devices * * Returns 0 on success or < 0 on error. */ static int mdiobus_create_device(struct mii_bus *bus, struct mdio_board_info *bi) { struct mdio_device *mdiodev; int ret = 0; mdiodev = mdio_device_create(bus, bi->mdio_addr); if (IS_ERR(mdiodev)) return -ENODEV; strncpy(mdiodev->modalias, bi->modalias, sizeof(mdiodev->modalias)); mdiodev->bus_match = mdio_device_bus_match; mdiodev->dev.platform_data = (void *)bi->platform_data; ret = mdio_device_register(mdiodev); if (ret) mdio_device_free(mdiodev); return ret; } /** * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus * @owner: module containing bus accessor functions * * Description: Called by a bus driver to bring up all the PHYs * on a given bus, and attach them to the bus. Drivers should use * mdiobus_register() rather than __mdiobus_register() unless they * need to pass a specific owner module. MDIO devices which are not * PHYs will not be brought up by this function. They are expected to * to be explicitly listed in DT and instantiated by of_mdiobus_register(). * * Returns 0 on success or < 0 on error. */ int __mdiobus_register(struct mii_bus *bus, struct module *owner) { struct mdio_device *mdiodev; int i, err; struct gpio_desc *gpiod; if (NULL == bus || NULL == bus->name || NULL == bus->read || NULL == bus->write) return -EINVAL; BUG_ON(bus->state != MDIOBUS_ALLOCATED && bus->state != MDIOBUS_UNREGISTERED); bus->owner = owner; bus->dev.parent = bus->parent; bus->dev.class = &mdio_bus_class; bus->dev.groups = NULL; dev_set_name(&bus->dev, "%s", bus->id); err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); return -EINVAL; } mutex_init(&bus->mdio_lock); /* de-assert bus level PHY GPIO reset */ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", bus->id); device_del(&bus->dev); return PTR_ERR(gpiod); } else if (gpiod) { bus->reset_gpiod = gpiod; gpiod_set_value_cansleep(gpiod, 1); udelay(bus->reset_delay_us); gpiod_set_value_cansleep(gpiod, 0); } if (bus->reset) bus->reset(bus); for (i = 0; i < PHY_MAX_ADDR; i++) { if ((bus->phy_mask & (1 << i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) { err = PTR_ERR(phydev); goto error; } } } mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); bus->state = MDIOBUS_REGISTERED; pr_info("%s: probed\n", bus->name); return 0; error: while (--i >= 0) { mdiodev = bus->mdio_map[i]; if (!mdiodev) continue; mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } /* Put PHYs in RESET to save power */ if (bus->reset_gpiod) gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); return err; } EXPORT_SYMBOL(__mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { struct mdio_device *mdiodev; int i; BUG_ON(bus->state != MDIOBUS_REGISTERED); bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { mdiodev = bus->mdio_map[i]; if (!mdiodev) continue; if (mdiodev->reset) gpiod_put(mdiodev->reset); mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } /* Put PHYs in RESET to save power */ if (bus->reset_gpiod) gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); } EXPORT_SYMBOL(mdiobus_unregister); /** * mdiobus_free - free a struct mii_bus * @bus: mii_bus to free * * This function releases the reference to the underlying device * object in the mii_bus. If this is the last reference, the mii_bus * will be freed. */ void mdiobus_free(struct mii_bus *bus) { /* For compatibility with error handling in drivers. */ if (bus->state == MDIOBUS_ALLOCATED) { kfree(bus); return; } BUG_ON(bus->state != MDIOBUS_UNREGISTERED); bus->state = MDIOBUS_RELEASED; put_device(&bus->dev); } EXPORT_SYMBOL(mdiobus_free); /** * mdiobus_scan - scan a bus for MDIO devices. * @bus: mii_bus to scan * @addr: address on bus to scan * * This function scans the MDIO bus, looking for devices which can be * identified using a vendor/product ID in registers 2 and 3. Not all * MDIO devices have such registers, but PHY devices typically * do. Hence this function assumes anything found is a PHY, or can be * treated as a PHY. Other MDIO devices, such as switches, will * probably not be found during the scan. */ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { struct phy_device *phydev; int err; phydev = get_phy_device(bus, addr, false); if (IS_ERR(phydev)) return phydev; /* * For DT, see if the auto-probed phy has a correspoding child * in the bus node, and set the of_node pointer in this case. */ of_mdiobus_link_mdiodev(bus, &phydev->mdio); err = phy_device_register(phydev); if (err) { phy_device_free(phydev); return ERR_PTR(-ENODEV); } return phydev; } EXPORT_SYMBOL(mdiobus_scan); /** * __mdiobus_read - Unlocked version of the mdiobus_read function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * Read a MDIO bus register. Caller must hold the mdio bus lock. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) { int retval; WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock)); retval = bus->read(bus, addr, regnum); trace_mdio_access(bus, 1, addr, regnum, retval, retval); return retval; } EXPORT_SYMBOL(__mdiobus_read); /** * __mdiobus_write - Unlocked version of the mdiobus_write function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * Write a MDIO bus register. Caller must hold the mdio bus lock. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock)); err = bus->write(bus, addr, regnum, val); trace_mdio_access(bus, 0, addr, regnum, val, err); return err; } EXPORT_SYMBOL(__mdiobus_write); /** * mdiobus_read_nested - Nested version of the mdiobus_read function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * In case of nested MDIO bus access avoid lockdep false positives by * using mutex_lock_nested(). * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) { int retval; BUG_ON(in_interrupt()); mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_read_nested); /** * mdiobus_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) { int retval; BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_read); /** * mdiobus_write_nested - Nested version of the mdiobus_write function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * In case of nested MDIO bus access avoid lockdep false positives by * using mutex_lock_nested(). * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; BUG_ON(in_interrupt()); mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_write_nested); /** * mdiobus_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_write); /** * mdio_bus_match - determine if given MDIO driver supports the given * MDIO device * @dev: target MDIO device * @drv: given MDIO driver * * Description: Given a MDIO device, and a MDIO driver, return 1 if * the driver supports the device. Otherwise, return 0. This may * require calling the devices own match function, since different classes * of MDIO devices have different match criteria. */ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { struct mdio_device *mdio = to_mdio_device(dev); if (of_driver_match_device(dev, drv)) return 1; if (mdio->bus_match) return mdio->bus_match(dev, drv); return 0; } static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) { int rc; /* Some devices have extra OF data and an OF-style MODALIAS */ rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; return 0; } struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .uevent = mdio_uevent, }; EXPORT_SYMBOL(mdio_bus_type); int __init mdio_bus_init(void) { int ret; ret = class_register(&mdio_bus_class); if (!ret) { ret = bus_register(&mdio_bus_type); if (ret) class_unregister(&mdio_bus_class); } return ret; } EXPORT_SYMBOL_GPL(mdio_bus_init); #if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) { class_unregister(&mdio_bus_class); bus_unregister(&mdio_bus_type); } EXPORT_SYMBOL_GPL(mdio_bus_exit); #else module_init(mdio_bus_init); /* no module_exit, intentional */ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MDIO bus/device layer"); #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_888_0
crossvul-cpp_data_bad_5489_0
/* * Functions related to mapping data to requests */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/uio.h> #include "blk.h" /* * Append a bio to a passthrough request. Only works can be merged into * the request based on the driver constraints. */ int blk_rq_append_bio(struct request *rq, struct bio *bio) { if (!rq->bio) { blk_rq_bio_prep(rq->q, rq, bio); } else { if (!ll_back_merge_fn(rq->q, rq, bio)) return -EINVAL; rq->biotail->bi_next = bio; rq->biotail = bio; rq->__data_len += bio->bi_iter.bi_size; } return 0; } EXPORT_SYMBOL(blk_rq_append_bio); static int __blk_rq_unmap_user(struct bio *bio) { int ret = 0; if (bio) { if (bio_flagged(bio, BIO_USER_MAPPED)) bio_unmap_user(bio); else ret = bio_uncopy_user(bio); } return ret; } static int __blk_rq_map_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask, bool copy) { struct request_queue *q = rq->q; struct bio *bio, *orig_bio; int ret; if (copy) bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); else bio = bio_map_user_iov(q, iter, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); if (map_data && map_data->null_mapped) bio_set_flag(bio, BIO_NULL_MAPPED); iov_iter_advance(iter, bio->bi_iter.bi_size); if (map_data) map_data->offset += bio->bi_iter.bi_size; orig_bio = bio; blk_queue_bounce(q, &bio); /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); ret = blk_rq_append_bio(rq, bio); if (ret) { bio_endio(bio); __blk_rq_unmap_user(orig_bio); bio_put(bio); return ret; } return 0; } /** * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to map data to * @map_data: pointer to the rq_map_data holding pages (if necessary) * @iter: iovec iterator * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly for zero copy I/O, if possible. Otherwise * a kernel bounce buffer is used. * * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) { bool copy = false; unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); struct bio *bio = NULL; struct iov_iter i; int ret; if (map_data) copy = true; else if (iov_iter_alignment(iter) & align) copy = true; else if (queue_virt_boundary(q)) copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); i = *iter; do { ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); if (ret) goto unmap_rq; if (!bio) bio = rq->bio; } while (iov_iter_count(&i)); if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; return 0; unmap_rq: __blk_rq_unmap_user(bio); rq->bio = NULL; return -EINVAL; } EXPORT_SYMBOL(blk_rq_map_user_iov); int blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) { struct iovec iov; struct iov_iter i; int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); if (unlikely(ret < 0)) return ret; return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); } EXPORT_SYMBOL(blk_rq_map_user); /** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since * the I/O completion may have changed rq->bio. */ int blk_rq_unmap_user(struct bio *bio) { struct bio *mapped_bio; int ret = 0, ret2; while (bio) { mapped_bio = bio; if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; ret2 = __blk_rq_unmap_user(mapped_bio); if (ret2 && !ret) ret = ret2; mapped_bio = bio; bio = bio->bi_next; bio_put(mapped_bio); } return ret; } EXPORT_SYMBOL(blk_rq_unmap_user); /** * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly if possible. Otherwise a bounce * buffer is used. Can be called multiple times to append multiple * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; int do_copy = 0; struct bio *bio; int ret; if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else bio = bio_map_kern(q, kbuf, len, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); if (!reading) bio_set_op_attrs(bio, REQ_OP_WRITE, 0); if (do_copy) rq->cmd_flags |= REQ_COPY_USER; ret = blk_rq_append_bio(rq, bio); if (unlikely(ret)) { /* request is too big */ bio_put(bio); return ret; } blk_queue_bounce(q, &rq->bio); return 0; } EXPORT_SYMBOL(blk_rq_map_kern);
./CrossVul/dataset_final_sorted/CWE-416/c/bad_5489_0
crossvul-cpp_data_bad_282_0
/* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <rdma/rdma_user_cm.h> #include <rdma/ib_marshall.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, .maxlen = sizeof max_backlog, .mode = 0644, .proc_handler = proc_dointvec, }, { } }; struct ucma_file { struct mutex mut; struct file *filp; struct list_head ctx_list; struct list_head event_list; wait_queue_head_t poll_wait; struct workqueue_struct *close_wq; }; struct ucma_context { int id; struct completion comp; atomic_t ref; int events_reported; int backlog; struct ucma_file *file; struct rdma_cm_id *cm_id; u64 uid; struct list_head list; struct list_head mc_list; /* mark that device is in process of destroying the internal HW * resources, protected by the global mut */ int closing; /* sync between removal event and id destroy, protected by file mut */ int destroying; struct work_struct close_work; }; struct ucma_multicast { struct ucma_context *ctx; int id; int events_reported; u64 uid; u8 join_state; struct list_head list; struct sockaddr_storage addr; }; struct ucma_event { struct ucma_context *ctx; struct ucma_multicast *mc; struct list_head list; struct rdma_cm_id *cm_id; struct rdma_ucm_event_resp resp; struct work_struct close_work; }; static DEFINE_MUTEX(mut); static DEFINE_IDR(ctx_idr); static DEFINE_IDR(multicast_idr); static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) { struct ucma_context *ctx; ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file || !ctx->cm_id) ctx = ERR_PTR(-EINVAL); return ctx; } static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) { struct ucma_context *ctx; mutex_lock(&mut); ctx = _ucma_find_context(id, file); if (!IS_ERR(ctx)) { if (ctx->closing) ctx = ERR_PTR(-EIO); else atomic_inc(&ctx->ref); } mutex_unlock(&mut); return ctx; } static void ucma_put_ctx(struct ucma_context *ctx) { if (atomic_dec_and_test(&ctx->ref)) complete(&ctx->comp); } /* * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the * CM_ID is bound. */ static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) { struct ucma_context *ctx = ucma_get_ctx(file, id); if (IS_ERR(ctx)) return ctx; if (!ctx->cm_id->device) { ucma_put_ctx(ctx); return ERR_PTR(-EINVAL); } return ctx; } static void ucma_close_event_id(struct work_struct *work) { struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); rdma_destroy_id(uevent_close->cm_id); kfree(uevent_close); } static void ucma_close_id(struct work_struct *work) { struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); /* once all inflight tasks are finished, we close all underlying * resources. The context is still alive till its explicit destryoing * by its creator. */ ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); } static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) { struct ucma_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; INIT_WORK(&ctx->close_work, ucma_close_id); atomic_set(&ctx->ref, 1); init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; mutex_lock(&mut); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (ctx->id < 0) goto error; list_add_tail(&ctx->list, &file->ctx_list); return ctx; error: kfree(ctx); return NULL; } static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc; mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) return NULL; mutex_lock(&mut); mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (mc->id < 0) goto error; mc->ctx = ctx; list_add_tail(&mc->list, &ctx->mc_list); return mc; error: kfree(mc); return NULL; } static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, struct rdma_conn_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; dst->responder_resources =src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; } static void ucma_copy_ud_event(struct ib_device *device, struct rdma_ucm_ud_param *dst, struct rdma_ud_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); dst->qp_num = src->qp_num; dst->qkey = src->qkey; } static void ucma_set_event_context(struct ucma_context *ctx, struct rdma_cm_event *event, struct ucma_event *uevent) { uevent->ctx = ctx; switch (event->event) { case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: uevent->mc = (struct ucma_multicast *) event->param.ud.private_data; uevent->resp.uid = uevent->mc->uid; uevent->resp.id = uevent->mc->id; break; default: uevent->resp.uid = ctx->uid; uevent->resp.id = ctx->id; break; } } /* Called with file->mut locked for the relevant context. */ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) { struct ucma_context *ctx = cm_id->context; struct ucma_event *con_req_eve; int event_found = 0; if (ctx->destroying) return; /* only if context is pointing to cm_id that it owns it and can be * queued to be closed, otherwise that cm_id is an inflight one that * is part of that context event list pending to be detached and * reattached to its new context as part of ucma_get_event, * handled separately below. */ if (ctx->cm_id == cm_id) { mutex_lock(&mut); ctx->closing = 1; mutex_unlock(&mut); queue_work(ctx->file->close_wq, &ctx->close_work); return; } list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { if (con_req_eve->cm_id == cm_id && con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { list_del(&con_req_eve->list); INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); queue_work(ctx->file->close_wq, &con_req_eve->close_work); event_found = 1; break; } } if (!event_found) pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); } static int ucma_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_event *uevent; struct ucma_context *ctx = cm_id->context; int ret = 0; uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); if (!uevent) return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; mutex_lock(&ctx->file->mut); uevent->cm_id = cm_id; ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; uevent->resp.status = event->status; if (cm_id->qp_type == IB_QPT_UD) ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud, &event->param.ud); else ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { ret = -ENOMEM; kfree(uevent); goto out; } ctx->backlog--; } else if (!ctx->uid || ctx->cm_id != cm_id) { /* * We ignore events for new connections until userspace has set * their context. This can only happen if an error occurs on a * new connection before the user accepts it. This is okay, * since the accept will just fail later. However, we do need * to release the underlying HW resources in case of a device * removal event. */ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) ucma_removal_event_handler(cm_id); kfree(uevent); goto out; } list_add_tail(&uevent->list, &ctx->file->event_list); wake_up_interruptible(&ctx->file->poll_wait); if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) ucma_removal_event_handler(cm_id); out: mutex_unlock(&ctx->file->mut); return ret; } static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct ucma_context *ctx; struct rdma_ucm_get_event cmd; struct ucma_event *uevent; int ret = 0; /* * Old 32 bit user space does not send the 4 byte padding in the * reserved field. We don't care, allow it to keep working. */ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->mut); while (list_empty(&file->event_list)) { mutex_unlock(&file->mut); if (file->filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->event_list))) return -ERESTARTSYS; mutex_lock(&file->mut); } uevent = list_entry(file->event_list.next, struct ucma_event, list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { ctx = ucma_alloc_ctx(file); if (!ctx) { ret = -ENOMEM; goto done; } uevent->ctx->backlog++; ctx->cm_id = uevent->cm_id; ctx->cm_id->context = ctx; uevent->resp.id = ctx->id; } if (copy_to_user(u64_to_user_ptr(cmd.response), &uevent->resp, min_t(size_t, out_len, sizeof(uevent->resp)))) { ret = -EFAULT; goto done; } list_del(&uevent->list); uevent->ctx->events_reported++; if (uevent->mc) uevent->mc->events_reported++; kfree(uevent); done: mutex_unlock(&file->mut); return ret; } static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) { switch (cmd->ps) { case RDMA_PS_TCP: *qp_type = IB_QPT_RC; return 0; case RDMA_PS_UDP: case RDMA_PS_IPOIB: *qp_type = IB_QPT_UD; return 0; case RDMA_PS_IB: *qp_type = cmd->qp_type; return 0; default: return -EINVAL; } } static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ret = ucma_get_qp_type(&cmd, &qp_type); if (ret) return ret; mutex_lock(&file->mut); ctx = ucma_alloc_ctx(file); mutex_unlock(&file->mut); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; cm_id = __rdma_create_id(current->nsproxy->net_ns, ucma_event_handler, ctx, cmd.ps, qp_type, NULL); if (IS_ERR(cm_id)) { ret = PTR_ERR(cm_id); goto err1; } resp.id = ctx->id; if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { ret = -EFAULT; goto err2; } ctx->cm_id = cm_id; return 0; err2: rdma_destroy_id(cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); mutex_lock(&file->mut); list_del(&ctx->list); mutex_unlock(&file->mut); kfree(ctx); return ret; } static void ucma_cleanup_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc, *tmp; mutex_lock(&mut); list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { list_del(&mc->list); idr_remove(&multicast_idr, mc->id); kfree(mc); } mutex_unlock(&mut); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { if (uevent->mc != mc) continue; list_del(&uevent->list); kfree(uevent); } } /* * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At * this point, no new events will be reported from the hardware. However, we * still need to cleanup the UCMA context for this ID. Specifically, there * might be events that have not yet been consumed by the user space software. * These might include pending connect requests which we have not completed * processing. We cannot call rdma_destroy_id while holding the lock of the * context (file->mut), as it might cause a deadlock. We therefore extract all * relevant events from the context pending events list while holding the * mutex. After that we release them as needed. */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); ucma_cleanup_multicast(ctx); /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx == ctx) list_move_tail(&uevent->list, &list); } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &list, list) { list_del(&uevent->list); if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) rdma_destroy_id(uevent->cm_id); kfree(uevent); } events_reported = ctx->events_reported; kfree(ctx); return events_reported; } static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_context *ctx; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&mut); ctx = _ucma_find_context(cmd.id, file); if (!IS_ERR(ctx)) idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->file->mut); ctx->destroying = 1; mutex_unlock(&ctx->file->mut); flush_workqueue(ctx->file->close_wq); /* At this point it's guaranteed that there is no inflight * closing task */ mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); rdma_destroy_id(ctx->cm_id); } else { mutex_unlock(&mut); } resp.events_reported = ucma_free_ctx(ctx); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_in6(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || !cmd.addr_size || cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || !rdma_addr_size_in6(&cmd.dst_addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_addr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_route cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); /* fall through */ case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, (union ib_gid *)&resp->ib_route[0].dgid); rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, (union ib_gid *)&resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(0xffff); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); /* fall through */ case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); } static ssize_t ucma_query_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct rdma_ucm_query_route_resp resp; struct ucma_context *ctx; struct sockaddr *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (!ctx->cm_id->device) goto out; resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; ucma_put_ctx(ctx); return ret; } static void ucma_query_device_addr(struct rdma_cm_id *cm_id, struct rdma_ucm_query_addr_resp *resp) { if (!cm_id->device) return; resp->node_guid = (__force __u64) cm_id->device->node_guid; resp->port_num = cm_id->port_num; resp->pkey = (__force __u16) cpu_to_be16( ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); } static ssize_t ucma_query_addr(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; resp.src_size = rdma_addr_size(addr); memcpy(&resp.src_addr, addr, resp.src_size); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; resp.dst_size = rdma_addr_size(addr); memcpy(&resp.dst_addr, addr, resp.dst_size); ucma_query_device_addr(ctx->cm_id, &resp); if (copy_to_user(response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_query_path(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_path_resp *resp; int i, ret = 0; if (out_len < sizeof(*resp)) return -ENOSPC; resp = kzalloc(out_len, GFP_KERNEL); if (!resp) return -ENOMEM; resp->num_paths = ctx->cm_id->route.num_paths; for (i = 0, out_len -= sizeof(*resp); i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); i++, out_len -= sizeof(struct ib_path_rec_data)) { struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { struct sa_path_rec ib; sa_convert_path_opa_to_ib(&ib, rec); ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); } else { ib_sa_pack_path(rec, &resp->path_data[i].path_rec); } } if (copy_to_user(response, resp, sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) ret = -EFAULT; kfree(resp); return ret; } static ssize_t ucma_query_gid(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr_ib *addr; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; memset(&resp, 0, sizeof resp); ucma_query_device_addr(ctx->cm_id, &resp); addr = (struct sockaddr_ib *) &resp.src_addr; resp.src_size = sizeof(*addr); if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr, NULL); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.src_addr); } addr = (struct sockaddr_ib *) &resp.dst_addr; resp.dst_size = sizeof(*addr); if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, NULL, (union ib_gid *)&addr->sib_addr); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr); } if (copy_to_user(response, &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_query(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct ucma_context *ctx; void __user *response; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; response = u64_to_user_ptr(cmd.response); ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_PATH: ret = ucma_query_path(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_GID: ret = ucma_query_gid(ctx, response, out_len); break; default: ret = -ENOSYS; break; } ucma_put_ctx(ctx); return ret; } static void ucma_copy_conn_param(struct rdma_cm_id *id, struct rdma_conn_param *dst, struct rdma_ucm_conn_param *src) { dst->private_data = src->private_data; dst->private_data_len = src->private_data_len; dst->responder_resources =src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_connect cmd; struct rdma_conn_param conn_param; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!cmd.conn_param.valid) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ret = rdma_connect(ctx->cm_id, &conn_param); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_listen cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; ret = rdma_listen(ctx->cm_id, ctx->backlog); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_accept cmd; struct rdma_conn_param conn_param; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); } else ret = __rdma_accept(ctx->cm_id, NULL, NULL); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_reject cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_disconnect cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_disconnect(ctx->cm_id); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_init_qp_attr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_init_qp_attr cmd; struct ib_uverbs_qp_attr resp; struct ucma_context *ctx; struct ib_qp_attr qp_attr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.qp_state > IB_QPS_ERR) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); if (ret) goto out; ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: ucma_put_ctx(ctx); return ret; } static int ucma_set_option_id(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret = 0; switch (optname) { case RDMA_OPTION_ID_TOS: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; case RDMA_OPTION_ID_REUSEADDR: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_AFONLY: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_ib_path(struct ucma_context *ctx, struct ib_path_rec_data *path_data, size_t optlen) { struct sa_path_rec sa_path; struct rdma_cm_event event; int ret; if (optlen % sizeof(*path_data)) return -EINVAL; for (; optlen; optlen -= sizeof(*path_data), path_data++) { if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL)) break; } if (!optlen) return -EINVAL; if (!ctx->cm_id->device) return -EINVAL; memset(&sa_path, 0, sizeof(sa_path)); sa_path.rec_type = SA_PATH_REC_TYPE_IB; ib_sa_unpack_path(path_data->path_rec, &sa_path); if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) { struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); ret = rdma_set_ib_path(ctx->cm_id, &opa); } else { ret = rdma_set_ib_path(ctx->cm_id, &sa_path); } if (ret) return ret; memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; return ucma_event_handler(ctx->cm_id, &event); } static int ucma_set_option_ib(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret; switch (optname) { case RDMA_OPTION_IB_PATH: ret = ucma_set_ib_path(ctx, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { int ret; switch (level) { case RDMA_OPTION_ID: ret = ucma_set_option_id(ctx, optname, optval, optlen); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_set_option cmd; struct ucma_context *ctx; void *optval; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); optval = memdup_user(u64_to_user_ptr(cmd.optval), cmd.optlen); if (IS_ERR(optval)) { ret = PTR_ERR(optval); goto out; } ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, cmd.optlen); kfree(optval); out: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_notify cmd; struct ucma_context *ctx; int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_process_join(struct ucma_file *file, struct rdma_ucm_join_mcast *cmd, int out_len) { struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct ucma_multicast *mc; struct sockaddr *addr; int ret; u8 join_state; if (out_len < sizeof(resp)) return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; if (cmd->addr_size != rdma_addr_size(addr)) return -EINVAL; if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) join_state = BIT(FULLMEMBER_JOIN); else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) join_state = BIT(SENDONLY_FULLMEMBER_JOIN); else return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&file->mut); mc = ucma_alloc_multicast(ctx); if (!mc) { ret = -ENOMEM; goto err1; } mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); if (ret) goto err2; resp.id = mc->id; if (copy_to_user(u64_to_user_ptr(cmd->response), &resp, sizeof(resp))) { ret = -EFAULT; goto err3; } mutex_unlock(&file->mut); ucma_put_ctx(ctx); return 0; err3: rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); ucma_cleanup_mc_events(mc); err2: mutex_lock(&mut); idr_remove(&multicast_idr, mc->id); mutex_unlock(&mut); list_del(&mc->list); kfree(mc); err1: mutex_unlock(&file->mut); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_join_ip_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_ip_mcast cmd; struct rdma_ucm_join_mcast join_cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); if (!join_cmd.addr_size) return -EINVAL; join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); return ucma_process_join(file, &join_cmd, out_len); } static ssize_t ucma_join_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_mcast cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_kss(&cmd.addr)) return -EINVAL; return ucma_process_join(file, &cmd, out_len); } static ssize_t ucma_leave_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_multicast *mc; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&mut); mc = idr_find(&multicast_idr, cmd.id); if (!mc) mc = ERR_PTR(-ENOENT); else if (mc->ctx->file != file) mc = ERR_PTR(-EINVAL); else if (!atomic_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); else idr_remove(&multicast_idr, mc->id); mutex_unlock(&mut); if (IS_ERR(mc)) { ret = PTR_ERR(mc); goto out; } rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); mutex_unlock(&mc->ctx->file->mut); ucma_put_ctx(mc->ctx); resp.events_reported = mc->events_reported; kfree(mc); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: return ret; } static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) { /* Acquire mutex's based on pointer comparison to prevent deadlock. */ if (file1 < file2) { mutex_lock(&file1->mut); mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); } else { mutex_lock(&file2->mut); mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); } } static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) { if (file1 < file2) { mutex_unlock(&file2->mut); mutex_unlock(&file1->mut); } else { mutex_unlock(&file1->mut); mutex_unlock(&file2->mut); } } static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) { struct ucma_event *uevent, *tmp; list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) if (uevent->ctx == ctx) list_move_tail(&uevent->list, &file->event_list); } static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; struct ucma_context *ctx; struct fd f; struct ucma_file *cur_file; int ret = 0; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; /* Get current fd to protect against it being closed */ f = fdget(cmd.fd); if (!f.file) return -ENOENT; /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(f.file->private_data, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; } cur_file = ctx->file; if (cur_file == new_file) { resp.events_reported = ctx->events_reported; goto response; } /* * Migrate events between fd's, maintaining order, and avoiding new * events being added before existing events. */ ucma_lock_files(cur_file, new_file); mutex_lock(&mut); list_move_tail(&ctx->list, &new_file->ctx_list); ucma_move_events(ctx, new_file); ctx->file = new_file; resp.events_reported = ctx->events_reported; mutex_unlock(&mut); ucma_unlock_files(cur_file, new_file); response: if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; ucma_put_ctx(ctx); file_put: fdput(f); return ret; } static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, [RDMA_USER_CM_CMD_REJECT] = ucma_reject, [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, [RDMA_USER_CM_CMD_GET_OPTION] = NULL, [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, [RDMA_USER_CM_CMD_QUERY] = ucma_query, [RDMA_USER_CM_CMD_BIND] = ucma_bind, [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast }; static ssize_t ucma_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct ucma_file *file = filp->private_data; struct rdma_ucm_cmd_hdr hdr; ssize_t ret; if (!ib_safe_file_access(filp)) { pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", task_tgid_vnr(current), current->comm); return -EACCES; } if (len < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) return -EINVAL; if (hdr.in + sizeof(hdr) > len) return -EINVAL; if (!ucma_cmd_table[hdr.cmd]) return -ENOSYS; ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); if (!ret) ret = len; return ret; } static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) { struct ucma_file *file = filp->private_data; __poll_t mask = 0; poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->event_list)) mask = EPOLLIN | EPOLLRDNORM; return mask; } /* * ucma_open() does not need the BKL: * * - no global state is referred to; * - there is no ioctl method to race against; * - no further module initialization is required for open to work * after the device is registered. */ static int ucma_open(struct inode *inode, struct file *filp) { struct ucma_file *file; file = kmalloc(sizeof *file, GFP_KERNEL); if (!file) return -ENOMEM; file->close_wq = alloc_ordered_workqueue("ucma_close_id", WQ_MEM_RECLAIM); if (!file->close_wq) { kfree(file); return -ENOMEM; } INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); filp->private_data = file; file->filp = filp; return nonseekable_open(inode, filp); } static int ucma_close(struct inode *inode, struct file *filp) { struct ucma_file *file = filp->private_data; struct ucma_context *ctx, *tmp; mutex_lock(&file->mut); list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { ctx->destroying = 1; mutex_unlock(&file->mut); mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); flush_workqueue(file->close_wq); /* At that step once ctx was marked as destroying and workqueue * was flushed we are safe from any inflights handlers that * might put other closing task. */ mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); /* rdma_destroy_id ensures that no event handlers are * inflight for that id before releasing it. */ rdma_destroy_id(ctx->cm_id); } else { mutex_unlock(&mut); } ucma_free_ctx(ctx); mutex_lock(&file->mut); } mutex_unlock(&file->mut); destroy_workqueue(file->close_wq); kfree(file); return 0; } static const struct file_operations ucma_fops = { .owner = THIS_MODULE, .open = ucma_open, .release = ucma_close, .write = ucma_write, .poll = ucma_poll, .llseek = no_llseek, }; static struct miscdevice ucma_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "rdma_cm", .nodename = "infiniband/rdma_cm", .mode = 0666, .fops = &ucma_fops, }; static ssize_t show_abi_version(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); } static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); static int __init ucma_init(void) { int ret; ret = misc_register(&ucma_misc); if (ret) return ret; ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); if (ret) { pr_err("rdma_ucm: couldn't create abi_version attr\n"); goto err1; } ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); if (!ucma_ctl_table_hdr) { pr_err("rdma_ucm: couldn't register sysctl paths\n"); ret = -ENOMEM; goto err2; } return 0; err2: device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); err1: misc_deregister(&ucma_misc); return ret; } static void __exit ucma_cleanup(void) { unregister_net_sysctl_table(ucma_ctl_table_hdr); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); idr_destroy(&ctx_idr); idr_destroy(&multicast_idr); } module_init(ucma_init); module_exit(ucma_cleanup);
./CrossVul/dataset_final_sorted/CWE-416/c/bad_282_0
crossvul-cpp_data_good_148_1
/* radare - LGPL - Copyright 2009-2018 - pancake, maijin */ #include "r_util.h" #include "r_core.h" static const char *help_msg_a[] = { "Usage:", "a", "[abdefFghoprxstc] [...]", "aa", "[?]", "analyze all (fcns + bbs) (aa0 to avoid sub renaming)", "a8", " [hexpairs]", "analyze bytes", "ab", "[b] [addr]", "analyze block at given address", "abb", " [len]", "analyze N basic blocks in [len] (section.size by default)", "ac", " [cycles]", "analyze which op could be executed in [cycles]", "ad", "[?]", "analyze data trampoline (wip)", "ad", " [from] [to]", "analyze data pointers to (from-to)", "ae", "[?] [expr]", "analyze opcode eval expression (see ao)", "af", "[?]", "analyze Functions", "aF", "", "same as above, but using anal.depth=1", "ag", "[?] [options]", "output Graphviz code", "ah", "[?]", "analysis hints (force opcode size, ...)", "ai", " [addr]", "address information (show perms, stack, heap, ...)", "an"," [name] [@addr]","show/rename/create whatever flag/function is used at addr", "ao", "[?] [len]", "analyze Opcodes (or emulate it)", "aO", "[?] [len]", "Analyze N instructions in M bytes", "ap", "", "find prelude for current offset", "ar", "[?]", "like 'dr' but for the esil vm. (registers)", "as", "[?] [num]", "analyze syscall using dbg.reg", "av", "[?] [.]", "show vtables", "ax", "[?]", "manage refs/xrefs (see also afx?)", NULL }; static const char *help_msg_aa[] = { "Usage:", "aa[0*?]", " # see also 'af' and 'afna'", "aa", " ", "alias for 'af@@ sym.*;af@entry0;afva'", //;.afna @@ fcn.*'", "aa*", "", "analyze all flags starting with sym. (af @@ sym.*)", "aaa", "[?]", "autoname functions after aa (see afna)", "aab", "", "aab across io.sections.text", "aac", " [len]", "analyze function calls (af @@ `pi len~call[1]`)", "aac*", " [len]", "flag function calls without performing a complete analysis", "aad", " [len]", "analyze data references to code", "aae", " [len] ([addr])", "analyze references with ESIL (optionally to address)", "aaE", "", "run aef on all functions (same as aef @@f)", "aaf", " ", "analyze all functions (e anal.hasnext=1;afr @@c:isq)", "aai", "[j]", "show info of all analysis parameters", "aan", "", "autoname functions that either start with fcn.* or sym.func.*", "aap", "", "find and analyze function preludes", "aar", "[?] [len]", "analyze len bytes of instructions for references", "aas", " [len]", "analyze symbols (af @@= `isq~[0]`)", "aat", " [len]", "analyze all consecutive functions in section", "aaT", " [len]", "analyze code after trap-sleds", "aau", " [len]", "list mem areas (larger than len bytes) not covered by functions", "aav", " [sat]", "find values referencing a specific section or map", NULL }; static const char *help_msg_aar[] = { "Usage:", "aar", "[j*] [sz] # search and analyze xrefs", "aar", " [sz]", "analyze xrefs in current section or sz bytes of code", "aar*", " [sz]", "list found xrefs in radare commands format", "aarj", " [sz]", "list found xrefs in JSON format", NULL }; static const char *help_msg_ab[] = { "Usage:", "ab", "", "ab", " [addr]", "show basic block information at given address", "abb", " [length]", "analyze N bytes and extract basic blocks", "abj", "", "display basic block information in JSON", "abx", " [hexpair-bytes]", "analyze N bytes", NULL }; static const char *help_msg_ad[] = { "Usage:", "ad", "[kt] [...]", "ad", " [N] [D]", "analyze N data words at D depth", "ad4", " [N] [D]", "analyze N data words at D depth (asm.bits=32)", "ad8", " [N] [D]", "analyze N data words at D depth (asm.bits=64)", "adf", "", "analyze data in function (use like .adf @@=`afl~[0]`", "adfg", "", "analyze data in function gaps", "adt", "", "analyze data trampolines (wip)", "adk", "", "analyze data kind (code, text, data, invalid, ...)", NULL }; static const char *help_msg_ae[] = { "Usage:", "ae[idesr?] [arg]", "ESIL code emulation", "ae", " [expr]", "evaluate ESIL expression", "ae?", "", "show this help", "ae??", "", "show ESIL help", "ae[aA]", "[f] [count]", "analyse esil accesses (regs, mem..)", "aec", "[?]", "continue until ^C", "aecs", " [sn]", "continue until syscall number", "aecu", " [addr]", "continue until address", "aecue", " [esil]", "continue until esil expression match", "aef", " [addr]", "emulate function", "aei", "", "initialize ESIL VM state (aei- to deinitialize)", "aeim", " [addr] [size] [name]", "initialize ESIL VM stack (aeim- remove)", "aeip", "", "initialize ESIL program counter to curseek", "aek", " [query]", "perform sdb query on ESIL.info", "aek-", "", "resets the ESIL.info sdb instance", "aep", "[?] [addr]", "manage esil pin hooks", "aepc", " [addr]", "change esil PC to this address", "aer", " [..]", "handle ESIL registers like 'ar' or 'dr' does", "aets", "[?]", "ESIL Trace session", "aes", "", "perform emulated debugger step", "aesp", " [X] [N]", "evaluate N instr from offset X", "aesb", "", "step back", "aeso", " ", "step over", "aesu", " [addr]", "step until given address", "aesue", " [esil]", "step until esil expression match", "aetr", "[esil]", "Convert an ESIL Expression to REIL", "aex", " [hex]", "evaluate opcode expression", NULL }; static const char *help_detail_ae[] = { "Examples:", "ESIL", " examples and documentation", "+", "=", "A+=B => B,A,+=", "+", "", "A=A+B => B,A,+,A,=", "++", "", "increment, 2,A,++ == 3 (see rsi,--=[1], ... )", "--", "", "decrement, 2,A,-- == 1", "*", "=", "A*=B => B,A,*=", "/", "=", "A/=B => B,A,/=", "%", "=", "A%=B => B,A,%=", "&", "=", "and ax, bx => bx,ax,&=", "|", "", "or r0, r1, r2 => r2,r1,|,r0,=", "!", "=", "negate all bits", "^", "=", "xor ax, bx => bx,ax,^=", "", "[]", "mov eax,[eax] => eax,[],eax,=", "=", "[]", "mov [eax+3], 1 => 1,3,eax,+,=[]", "=", "[1]", "mov byte[eax],1 => 1,eax,=[1]", "=", "[8]", "mov [rax],1 => 1,rax,=[8]", "[]", "", "peek from random position", "[*]", "", "peek some from random position", "=", "[*]", "poke some at random position", "$", "", "int 0x80 => 0x80,$", "$$", "", "simulate a hardware trap", "==", "", "pops twice, compare and update esil flags", "<", "", "compare for smaller", "<", "=", "compare for smaller or equal", ">", "", "compare for bigger", ">", "=", "compare bigger for or equal", ">>", "=", "shr ax, bx => bx,ax,>>= # shift right", "<<", "=", "shl ax, bx => bx,ax,<<= # shift left", ">>>", "=", "ror ax, bx => bx,ax,>>>= # rotate right", "<<<", "=", "rol ax, bx => bx,ax,<<<= # rotate left", "?{", "", "if popped value != 0 run the block until }", "POP", "", "drops last element in the esil stack", "DUP", "", "duplicate last value in stack", "NUM", "", "evaluate last item in stack to number", "PICK", "", "pick Nth element in stack", "RPICK", "", "pick Nth element in reversed stack", "SWAP", "", "swap last two values in stack", "TRAP", "", "stop execution", "BITS", "", "16,BITS # change bits, useful for arm/thumb", "TODO", "", "the instruction is not yet esilized", "STACK", "", "show contents of stack", "CLEAR", "", "clears the esil stack", "REPEAT", "", "repeat n times", "BREAK", "", "terminates the string parsing", "GOTO", "", "jump to the Nth word popped from the stack", NULL }; static const char *help_msg_aea[] = { "Examples:", "aea", " show regs used in a range", "aea", " [ops]", "Show regs used in N instructions (all,read,{no,}written,memreads,memwrites)", "aea*", " [ops]", "Create mem.* flags for memory accesses", "aeaf", "", "Show regs used in current function", "aear", " [ops]", "Show regs read in N instructions", "aeaw", " [ops]", "Show regs written in N instructions", "aean", " [ops]", "Show regs not written in N instructions", "aeaj", " [ops]", "Show aea output in JSON format", "aeA", " [len]", "Show regs used in N bytes (subcommands are the same)", NULL }; static const char *help_msg_aec[] = { "Examples:", "aec", " continue until ^c", "aec", "", "Continue until exception", "aecs", "", "Continue until syscall", "aecu", "[addr]", "Continue until address", "aecue", "[addr]", "Continue until esil expression", NULL }; static const char *help_msg_aep[] = { "Usage:", "aep[-c] ", " [...]", "aepc", " [addr]", "change program counter for esil", "aep", "-[addr]", "remove pin", "aep", " [name] @ [addr]", "set pin", "aep", "", "list pins", NULL }; static const char *help_msg_aets[] = { "Usage:", "aets ", " [...]", "aets", "", "List all ESIL trace sessions", "aets+", "", "Add ESIL trace session", NULL }; static const char *help_msg_af[] = { "Usage:", "af", "", "af", " ([name]) ([addr])", "analyze functions (start at addr or $$)", "afr", " ([name]) ([addr])", "analyze functions recursively", "af+", " addr name [type] [diff]", "hand craft a function (requires afb+)", "af-", " [addr]", "clean all function analysis data (or function at addr)", "afb+", " fcnA bbA sz [j] [f] ([t]( [d]))", "add bb to function @ fcnaddr", "afb", "[?] [addr]", "List basic blocks of given function", "afB", " 16", "set current function as thumb (change asm.bits)", "afC[lc]", " ([addr])@[addr]", "calculate the Cycles (afC) or Cyclomatic Complexity (afCc)", "afc", "[?] type @[addr]", "set calling convention for function", "afd", "[addr]","show function + delta for given offset", "aft", "[?]", "type matching, type propagation", "aff", "", "re-adjust function boundaries to fit", "afF", "[1|0|]", "fold/unfold/toggle", "afi", " [addr|fcn.name]", "show function(s) information (verbose afl)", "afl", "[?] [l*] [fcn name]", "list functions (addr, size, bbs, name) (see afll)", "afo", " [fcn.name]", "show address for the function named like this", "afm", " name", "merge two functions", "afM", " name", "print functions map", "afn", "[?] name [addr]", "rename name for function at address (change flag too)", "afna", "", "suggest automatic name for current offset", "afs", " [addr] [fcnsign]", "get/set function signature at current address", "afS", "[stack_size]", "set stack frame size for function at current address", "afu", " [addr]", "resize and analyze function from current address until addr", "afv[bsra]", "?", "manipulate args, registers and variables in function", "afx", "[cCd-] src dst", "add/remove code/Call/data/string reference", NULL }; static const char *help_msg_afb[] = { "Usage:", "afb", " List basic blocks of given function", ".afbr-", "", "Set breakpoint on every return address of the function", ".afbr-*", "", "Remove breakpoint on every return address of the function", "afb", " [addr]", "list basic blocks of function", "afb.", " [addr]", "show info of current basic block", "afb+", " fcn_at bbat bbsz [jump] [fail] ([type] ([diff]))", "add basic block by hand", "afbr", "", "Show addresses of instructions which leave the function", "afbi", "", "print current basic block information", "afbj", "", "show basic blocks information in json", "afbe", " bbfrom bbto", "add basic-block edge for switch-cases", "afB", " [bits]", "define asm.bits for the given function", NULL }; static const char *help_msg_afc[] = { "Usage:", "afc[agl?]", "", "afc", " convention", "Manually set calling convention for current function", "afc", "", "Show Calling convention for the Current function", "afcr", "[j]", "Show register usage for the current function", "afca", "", "Analyse function for finding the current calling convention", "afcl", "", "List all available calling conventions", "afco", " path", "Open Calling Convention sdb profile from given path", NULL }; static const char *help_msg_afC[] = { "Usage:", "afC", " [addr]", "afC", "", "function cycles cost", "afCc", "", "cyclomatic complexity", "afCl", "", "loop count (backward jumps)", NULL }; static const char *help_msg_afi[] = { "Usage:", "afi[jl*]", " <addr>", "afi", "", "show information of the function", "afi.", "", "show function name in current offset", "afi*", "", "function, variables and arguments", "afij", "", "function info in json format", "afil", "", "verbose function info", NULL }; static const char *help_msg_afl[] = { "Usage:", "afl", " List all functions", "afl", "", "list functions", "aflc", "", "count of functions", "aflj", "", "list functions in json", "afll", "", "list functions in verbose mode", "afllj", "", "list functions in verbose mode (alias to aflj)", "aflq", "", "list functions in quiet mode", "aflqj", "", "list functions in json quiet mode", "afls", "", "print sum of sizes of all functions", NULL }; static const char *help_msg_afll[] = { "Usage:", "", " List functions in verbose mode", "", "", "", "Table fields:", "", "", "", "", "", "address", "", "start address", "size", "", "function size (realsize)", "nbbs", "", "number of basic blocks", "edges", "", "number of edges between basic blocks", "cc", "", "cyclomatic complexity ( cc = edges - blocks + 2 * exit_blocks)", "cost", "", "cyclomatic cost", "min bound", "", "minimal address", "range", "", "function size", "max bound", "", "maximal address", "calls", "", "number of caller functions", "locals", "", "number of local variables", "args", "", "number of function arguments", "xref", "", "number of cross references", "frame", "", "function stack size", "name", "", "function name", NULL }; static const char *help_msg_afn[] = { "Usage:", "afn[sa]", " Analyze function names", "afn", " [name]", "rename the function", "afna", "", "construct a function name for the current offset", "afns", "", "list all strings associated with the current function", NULL }; static const char *help_msg_aft[] = { "Usage:", "aftm", "", "afta", "", "Setup memory and analyse do type matching analysis for all functions", "aftm", "", "type matching analysis", NULL }; static const char *help_msg_afv[] = { "Usage:", "afv","[rbs]", "afvr", "[?]", "manipulate register based arguments", "afvb", "[?]", "manipulate bp based arguments/locals", "afvs", "[?]", "manipulate sp based arguments/locals", "afvR", " [varname]", "list addresses where vars are accessed", "afvW", " [varname]", "list addresses where vars are accessed", "afva", "", "analyze function arguments/locals", "afvd", " name", "output r2 command for displaying the value of args/locals in the debugger", "afvn", " [old_name] [new_name]", "rename argument/local", "afvt", " [name] [new_type]", "change type for given argument/local", "afv-", "([name])", "remove all or given var", NULL }; static const char *help_msg_afvb[] = { "Usage:", "afvb", " [idx] [name] ([type])", "afvb", "", "list base pointer based arguments, locals", "afvb*", "", "same as afvb but in r2 commands", "afvb", " [idx] [name] ([type])", "define base pointer based arguments, locals", "afvbj", "", "return list of base pointer based arguments, locals in JSON format", "afvb-", " [name]", "delete argument/locals at the given name", "afvbg", " [idx] [addr]", "define var get reference", "afvbs", " [idx] [addr]", "define var set reference", NULL }; static const char *help_msg_afvr[] = { "Usage:", "afvr", " [reg] [type] [name]", "afvr", "", "list register based arguments", "afvr*", "", "same as afvr but in r2 commands", "afvr", " [reg] [name] ([type])", "define register arguments", "afvrj", "", "return list of register arguments in JSON format", "afvr-", " [name]", "delete register arguments at the given index", "afvrg", " [reg] [addr]", "define argument get reference", "afvrs", " [reg] [addr]", "define argument set reference", NULL }; static const char *help_msg_afvs[] = { "Usage:", "afvs", " [idx] [type] [name]", "afvs", "", "list stack based arguments and locals", "afvs*", "", "same as afvs but in r2 commands", "afvs", " [idx] [name] [type]", "define stack based arguments,locals", "afvsj", "", "return list of stack based arguments and locals in JSON format", "afvs-", " [name]", "delete stack based argument or locals with the given name", "afvsg", " [idx] [addr]", "define var get reference", "afvss", " [idx] [addr]", "define var set reference", NULL }; static const char *help_msg_afx[] = { "Usage:", "afx[-cCd?] [src] [dst]", " manage function references (see also ar?)", "afxc", " sym.main+0x38 sym.printf", "add code ref", "afxC", " sym.main sym.puts", "add call ref", "afxd", " sym.main str.helloworld", "add data ref", "afx-", " sym.main str.helloworld", "remove reference", NULL }; static const char *help_msg_ag[] = { "Usage:", "ag[?f]", " Graphviz/graph code", "ag", " [addr]", "output graphviz code (bb at addr and children)", "ag-", "", "Reset the current ASCII art graph (see agn, age, agg?)", "aga", " [addr]", "idem, but only addresses", "agr", "[j] [addr]", "output graphviz call graph of function", "agg", "", "display current graph created with agn and age (see also ag-)", "agc", "[*j] [addr]", "output graphviz call graph of function", "agC", "[j]", "Same as agc -1. full program callgraph", "agd", " [fcn name]", "output graphviz code of diffed function", "age", "[?] title1 title2", "Add an edge to the current graph", "agf", " [addr]", "Show ASCII art graph of given function", "agg", "[?] [kdi*]", "Print graph in ASCII-Art, graphviz, k=v, r2 or visual", "agj", " [addr]", "idem, but in JSON format", "agJ", " [addr]", "idem, but in JSON format with formatted disassembly (like pdJ)", "agk", " [addr]", "idem, but in SDB key-value format", "agl", " [fcn name]", "output graphviz code using meta-data", "agn", "[?] title body", "Add a node to the current graph", "ags", " [addr]", "output simple graphviz call graph of function (only bb offset)", "agt", " [addr]", "find paths from current offset to given address", "agv", "", "Show function graph in web/png (see graph.web and cmd.graph) or agf for asciiart", NULL }; static const char *help_msg_age[] = { "Usage:", "age [title1] [title2]", "", "Examples:", "", "", "age", " title1 title2", "Add an edge from the node with \"title1\" as title to the one with title \"title2\"", "age", " \"title1 with spaces\" title2", "Add an edge from node \"title1 with spaces\" to node \"title2\"", "age-", " title1 title2", "Remove an edge from the node with \"title1\" as title to the one with title \"title2\"", "age?", "", "Show this help", NULL }; static const char *help_msg_agg[] = { "Usage:", "agg[kid?*]", "print graph", "agg", "", "show current graph in ascii art", "aggk", "", "show graph in key=value form", "aggi", "", "enter interactive mode for the current graph", "aggd", "", "print the current graph in GRAPHVIZ dot format", "aggv", "", "run graphviz + viewer (see 'e cmd.graph')", "agg*", "", "in r2 commands, to save in projects, etc", NULL }; static const char *help_msg_agn[] = { "Usage:", "agn [title] [body]", "", "Examples:", "", "", "agn", " title1 body1", "Add a node with title \"title1\" and body \"body1\"", "agn", " \"title with space\" \"body with space\"", "Add a node with spaces in the title and in the body", "agn", " title1 base64:Ym9keTE=", "Add a node with the body specified as base64", "agn-", " title1", "Remove a node with title \"title1\"", "agn?", "", "Show this help", NULL }; static const char *help_msg_ah[] = { "Usage:", "ah[lba-]", "Analysis Hints", "ah?", "", "show this help", "ah?", " offset", "show hint of given offset", "ah", "", "list hints in human-readable format", "ah.", "", "list hints in human-readable format from current offset", "ah-", "", "remove all hints", "ah-", " offset [size]", "remove hints at given offset", "ah*", " offset", "list hints in radare commands format", "aha", " ppc 51", "set arch for a range of N bytes", "ahb", " 16 @ $$", "force 16bit for current instruction", "ahc", " 0x804804", "override call/jump address", "ahe", " 3,eax,+=", "set vm analysis string", "ahf", " 0x804840", "override fallback address for call", "ahh", " 0x804840", "highlight this adrress offset in disasm", "ahi", "[?] 10", "define numeric base for immediates (1, 8, 10, 16, s)", "ahj", "", "list hints in JSON", "aho", " foo a0,33", "replace opcode string", "ahp", " addr", "set pointer hint", "ahs", " 4", "set opcode size=4", "ahS", " jz", "set asm.syntax=jz for this opcode", NULL }; static const char *help_msg_ahi[] = { "Usage", "ahi [sbodh] [@ offset]", " Define numeric base", "ahi", " [base]", "set numeric base (1, 2, 8, 10, 16)", "ahi", " b", "set base to binary (2)", "ahi", " d", "set base to decimal (10)", "ahi", " h", "set base to hexadecimal (16)", "ahi", " o", "set base to octal (8)", "ahi", " p", "set base to htons(port) (3)", "ahi", " i", "set base to IP address (32)", "ahi", " S", "set base to syscall (80)", "ahi", " s", "set base to string (1)", NULL }; static const char *help_msg_ao[] = { "Usage:", "ao[e?] [len]", "Analyze Opcodes", "aoj", " N", "display opcode analysis information in JSON for N opcodes", "aoe", " N", "display esil form for N opcodes", "aor", " N", "display reil form for N opcodes", "aos", " N", "display size of N opcodes", "ao", " 5", "display opcode analysis of 5 opcodes", "ao*", "", "display opcode in r commands", NULL }; static const char *help_msg_ar[] = { "Usage: ar", "", "# Analysis Registers", "ar", "", "Show 'gpr' registers", "ar0", "", "Reset register arenas to 0", "ara", "[?]", "Manage register arenas", "ar", " 16", "Show 16 bit registers", "ar", " 32", "Show 32 bit registers", "ar", " all", "Show all bit registers", "ar", " <type>", "Show all registers of given type", "arC", "", "Display register profile comments", "arr", "", "Show register references (telescoping)", "ar=", "([size])(:[regs])", "Show register values in columns", "ar?", " <reg>", "Show register value", "arb", " <type>", "Display hexdump of the given arena", "arc", " <name>", "Conditional flag registers", "ard", " <name>", "Show only different registers", "arn", " <regalias>", "Get regname for pc,sp,bp,a0-3,zf,cf,of,sg", "aro", "", "Show old (previous) register values", "arp", "[?] <file>", "Load register profile from file", "ars", "", "Stack register state", "art", "", "List all register types", "arw", " <hexnum>", "Set contents of the register arena", ".ar*", "", "Import register values as flags", ".ar-", "", "Unflag all registers", NULL }; static const char *help_msg_ara[] = { "Usage:", "ara[+-s]", "Register Arena Push/Pop/Swap", "ara", "", "show all register arenas allocated", "ara", "+", "push a new register arena for each type", "ara", "-", "pop last register arena", "aras", "", "swap last two register arenas", NULL }; static const char *help_msg_arw[] = { "Usage:", "arw ", "# Set contents of the register arena", "arw", " <hexnum>", "Set contents of the register arena", NULL }; static const char *help_msg_as[] = { "Usage: as[ljk?]", "", "syscall name <-> number utility", "as", "", "show current syscall and arguments", "as", " 4", "show syscall 4 based on asm.os and current regs/mem", "asc[a]", " 4", "dump syscall info in .asm or .h", "asf", " [k[=[v]]]", "list/set/unset pf function signatures (see fcnsign)", "asj", "", "list of syscalls in JSON", "asl", "", "list of syscalls by asm.os and asm.arch", "asl", " close", "returns the syscall number for close", "asl", " 4", "returns the name of the syscall number 4", "ask", " [query]", "perform syscall/ queries", NULL }; static const char *help_msg_av[] = { "Usage:", "av[?jr*]", " C++ vtables and RTTI", "av", "", "search for vtables in data sections and show results", "avj", "", "like av, but as json", "av*", "", "like av, but as r2 commands", "avr", "[j@addr]", "try to parse RTTI at vtable addr (see anal.cpp.abi)", "avra", "[j]", "search for vtables and try to parse RTTI at each of them", NULL }; static const char *help_msg_ax[] = { "Usage:", "ax[?d-l*]", " # see also 'afx?'", "ax", "", "list refs", "ax", " addr [at]", "add code ref pointing to addr (from curseek)", "ax-", " [at]", "clean all refs (or refs from addr)", "axc", " addr [at]", "add code jmp ref // unused?", "axC", " addr [at]", "add code call ref", "axg", " [addr]", "show xrefs graph to reach current function", "axgj", " [addr]", "show xrefs graph to reach current function in json format", "axd", " addr [at]", "add data ref", "axq", "", "list refs in quiet/human-readable format", "axj", "", "list refs in json format", "axF", " [flg-glob]", "find data/code references of flags", "axt", " [addr]", "find data/code references to this address", "axf", " [addr]", "find data/code references from this address", "axk", " [query]", "perform sdb query", "ax*", "", "output radare commands", NULL }; static void cmd_anal_init(RCore *core) { DEFINE_CMD_DESCRIPTOR (core, a); DEFINE_CMD_DESCRIPTOR (core, aa); DEFINE_CMD_DESCRIPTOR (core, aar); DEFINE_CMD_DESCRIPTOR (core, ab); DEFINE_CMD_DESCRIPTOR (core, ad); DEFINE_CMD_DESCRIPTOR (core, ae); DEFINE_CMD_DESCRIPTOR (core, aea); DEFINE_CMD_DESCRIPTOR (core, aec); DEFINE_CMD_DESCRIPTOR (core, aep); DEFINE_CMD_DESCRIPTOR (core, af); DEFINE_CMD_DESCRIPTOR (core, afb); DEFINE_CMD_DESCRIPTOR (core, afc); DEFINE_CMD_DESCRIPTOR (core, afC); DEFINE_CMD_DESCRIPTOR (core, afi); DEFINE_CMD_DESCRIPTOR (core, afl); DEFINE_CMD_DESCRIPTOR (core, afll); DEFINE_CMD_DESCRIPTOR (core, afn); DEFINE_CMD_DESCRIPTOR (core, aft); DEFINE_CMD_DESCRIPTOR (core, afv); DEFINE_CMD_DESCRIPTOR (core, afvb); DEFINE_CMD_DESCRIPTOR (core, afvr); DEFINE_CMD_DESCRIPTOR (core, afvs); DEFINE_CMD_DESCRIPTOR (core, afx); DEFINE_CMD_DESCRIPTOR (core, ag); DEFINE_CMD_DESCRIPTOR (core, age); DEFINE_CMD_DESCRIPTOR (core, agg); DEFINE_CMD_DESCRIPTOR (core, agn); DEFINE_CMD_DESCRIPTOR (core, ah); DEFINE_CMD_DESCRIPTOR (core, ahi); DEFINE_CMD_DESCRIPTOR (core, ao); DEFINE_CMD_DESCRIPTOR (core, ar); DEFINE_CMD_DESCRIPTOR (core, ara); DEFINE_CMD_DESCRIPTOR (core, arw); DEFINE_CMD_DESCRIPTOR (core, as); DEFINE_CMD_DESCRIPTOR (core, ax); } /* better aac for windows-x86-32 */ #define JAYRO_03 0 #if JAYRO_03 static bool anal_is_bad_call(RCore *core, ut64 from, ut64 to, ut64 addr, ut8 *buf, int bufi) { ut64 align = R_ABS (addr % PE_ALIGN); ut32 call_bytes; // XXX this is x86 specific if (align == 0) { call_bytes = (ut32)((ut8*)buf)[bufi + 3] << 24; call_bytes |= (ut32)((ut8*)buf)[bufi + 2] << 16; call_bytes |= (ut32)((ut8*)buf)[bufi + 1] << 8; call_bytes |= (ut32)((ut8*)buf)[bufi]; } else { call_bytes = (ut32)((ut8*)buf)[bufi - align + 3] << 24; call_bytes |= (ut32)((ut8*)buf)[bufi - align + 2] << 16; call_bytes |= (ut32)((ut8*)buf)[bufi - align + 1] << 8; call_bytes |= (ut32)((ut8*)buf)[bufi - align]; } if (call_bytes >= from && call_bytes <= to) { return true; } call_bytes = (ut32)((ut8*)buf)[bufi + 4] << 24; call_bytes |= (ut32)((ut8*)buf)[bufi + 3] << 16; call_bytes |= (ut32)((ut8*)buf)[bufi + 2] << 8; call_bytes |= (ut32)((ut8*)buf)[bufi + 1]; call_bytes += addr + 5; if (call_bytes >= from && call_bytes <= to) { return false; } return false; } #endif static void type_cmd(RCore *core, const char *input) { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); if (!fcn && *input != '?' && *input != 'a') { eprintf ("cant find function here\n"); return; } RListIter *it; ut64 seek; bool io_cache = r_config_get_i (core->config, "io.cache"); r_cons_break_push (NULL, NULL); switch (*input) { case 'a': // "afta" if (r_config_get_i (core->config, "cfg.debug")) { eprintf ("TOFIX: afta can't run in debugger mode.\n"); break; } seek = core->offset; r_core_cmd0 (core, "aei"); r_core_cmd0 (core, "aeim"); r_reg_arena_push (core->anal->reg); r_list_foreach (core->anal->fcns, it, fcn) { int ret = r_core_seek (core, fcn->addr, true); if (!ret) { continue; } r_anal_esil_set_pc (core->anal->esil, fcn->addr); r_core_anal_type_match (core, fcn); if (r_cons_is_breaked ()) { break; } } r_core_cmd0 (core, "aeim-"); r_core_cmd0 (core, "aei-"); r_core_seek (core, seek, true); r_reg_arena_pop (core->anal->reg); break; case 'm': // "aftm" r_config_set_i (core->config, "io.cache", true); seek = core->offset; r_anal_esil_set_pc (core->anal->esil, fcn? fcn->addr: core->offset); r_core_anal_type_match (core, fcn); r_core_seek (core, seek, true); r_config_set_i (core->config, "io.cache", io_cache); break; case '?': r_core_cmd_help (core, help_msg_aft); break; } r_cons_break_pop (); } static int cc_print(void *p, const char *k, const char *v) { if (!strcmp (v, "cc")) { r_cons_println (k); } return 1; } static void find_refs(RCore *core, const char *glob) { char cmd[128]; ut64 curseek = core->offset; while (*glob == ' ') glob++; if (!*glob) { glob = "str."; } if (*glob == '?') { eprintf ("Usage: arf [flag-str-filter]\n"); return; } eprintf ("Finding references of flags matching '%s'...\n", glob); snprintf (cmd, sizeof (cmd) - 1, ".(findstref) @@= `f~%s[0]`", glob); r_core_cmd0 (core, "(findstref,f here=$$,s entry0,/r here,f-here)"); r_core_cmd0 (core, cmd); r_core_cmd0 (core, "(-findstref)"); r_core_seek (core, curseek, 1); } /* set flags for every function */ static void flag_every_function(RCore *core) { RListIter *iter; RAnalFunction *fcn; r_flag_space_push (core->flags, "functions"); r_list_foreach (core->anal->fcns, iter, fcn) { r_flag_set (core->flags, fcn->name, fcn->addr, r_anal_fcn_size (fcn)); } r_flag_space_pop (core->flags); } static void var_help(RCore *core, char ch) { switch (ch) { case 'b': r_core_cmd_help (core, help_msg_afvb); break; case 's': r_core_cmd_help (core, help_msg_afvs); break; case 'r': r_core_cmd_help (core, help_msg_afvr); break; case '?': r_core_cmd_help (core, help_msg_afv); break; default: eprintf ("See afv?, afvb?, afvr? and afvs?\n"); } } static void var_accesses_list(RAnal *a, RAnalFunction *fcn, int delta, const char *typestr) { const char *var_local = sdb_fmt ("var.0x%"PFMT64x".%d.%d.%s", fcn->addr, 1, delta, typestr); const char *xss = sdb_const_get (a->sdb_fcns, var_local, 0); if (xss && *xss) { r_cons_printf ("%s\n", xss); } else { r_cons_newline (); } } static void list_vars(RCore *core, RAnalFunction *fcn, int type, const char *name) { RAnalVar *var; RListIter *iter; RList *list = r_anal_var_list (core->anal, fcn, 0); if (type == '*') { const char *bp = r_reg_get_name (core->anal->reg, R_REG_NAME_BP); r_cons_printf ("f-fcnvar*\n"); r_list_foreach (list, iter, var) { r_cons_printf ("f fcnvar.%s @ %s%s%d\n", var->name, bp, var->delta>=0? "+":"", var->delta); } return; } if (type != 'W' && type != 'R') { return; } const char *typestr = type == 'R'?"reads":"writes"; r_list_foreach (list, iter, var) { r_cons_printf ("%10s ", var->name); var_accesses_list (core->anal, fcn, var->delta, typestr); } } static int cmd_an(RCore *core, bool use_json, const char *name) { ut64 off = core->offset; RAnalOp op; char *q = NULL; ut64 tgt_addr = UT64_MAX; if (use_json) { r_cons_print ("["); } r_anal_op (core->anal, &op, off, core->block + off - core->offset, 32, R_ANAL_OP_MASK_ALL); tgt_addr = op.jump != UT64_MAX ? op.jump : op.ptr; if (op.var) { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, off, 0); if (fcn) { RAnalVar *bar = r_anal_var_get_byname (core->anal, fcn, op.var->name); if (!bar) { bar = r_anal_var_get_byname (core->anal, fcn, op.var->name); if (!bar) { bar = r_anal_var_get_byname (core->anal, fcn, op.var->name); } } if (bar) { if (name) { r_anal_var_rename (core->anal, fcn->addr, bar->scope, bar->kind, bar->name, name); } else if (!use_json) { r_cons_println (bar->name); } else { r_cons_printf ("{\"type\":\"var\",\"name\":\"%s\"}", bar->name); } } else { eprintf ("Cannot find variable\n"); } } else { eprintf ("Cannot find function\n"); } } else if (tgt_addr != UT64_MAX) { RAnalFunction *fcn = r_anal_get_fcn_at (core->anal, tgt_addr, R_ANAL_FCN_TYPE_NULL); RFlagItem *f = r_flag_get_i (core->flags, tgt_addr); if (fcn) { if (name) { q = r_str_newf ("afn %s 0x%"PFMT64x, name, tgt_addr); } else if (!use_json) { r_cons_println (fcn->name); } else { r_cons_printf ("{\"type\":\"function\",\"name\":\"%s\"}", fcn->name); } } else if (f) { if (name) { q = r_str_newf ("fr %s %s", f->name, name); } else if (!use_json) { r_cons_println (f->name); } else { r_cons_printf ("{\"type\":\"flag\",\"name\":\"%s\"}", f->name); } } else { if (name) { q = r_str_newf ("f %s @ 0x%"PFMT64x, name, tgt_addr); } else if (!use_json) { r_cons_printf ("0x%" PFMT64x "\n", tgt_addr); } else { r_cons_printf ("{\"type\":\"address\",\"offset\":" "%" PFMT64d "}", tgt_addr); } } } if (use_json) { r_cons_print ("]\n"); } if (q) { r_core_cmd0 (core, q); free (q); } r_anal_op_fini (&op); return 0; } static int var_cmd(RCore *core, const char *str) { char *p, *ostr; int delta, type = *str, res = true; RAnalVar *v1; RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); ostr = p = NULL; if (!str[0]) { // "afv" if (fcn) { r_core_cmd0 (core, "afvs"); r_core_cmd0 (core, "afvb"); r_core_cmd0 (core, "afvr"); return true; } eprintf ("Cannot find function\n"); return false; } if (str[0] == 'j') { // "afvj" if (fcn) { r_cons_printf ("{\"sp\":"); r_core_cmd0 (core, "afvsj"); r_cons_printf (",\"bp\":"); r_core_cmd0 (core, "afvbj"); r_cons_printf (",\"reg\":"); r_core_cmd0 (core, "afvrj"); r_cons_printf ("}\n"); return true; } eprintf ("Cannot find function\n"); return false; } if (!str[0] || str[1] == '?'|| str[0] == '?') { var_help (core, *str); return res; } if (!fcn) { eprintf ("Cannot find function in 0x%08"PFMT64x"\n", core->offset); return false; } ostr = p = strdup (str); /* Variable access CFvs = set fun var */ switch (str[0]) { case '-': // "afv" if (fcn) { r_core_cmdf (core, "afvs-%s", str + 1); r_core_cmdf (core, "afvb-%s", str + 1); r_core_cmdf (core, "afvr-%s", str + 1); return true; } eprintf ("Cannot find function\n"); return false; case 'R': // "afvR" case 'W': // "afvW" case '*': // "afv*" list_vars (core, fcn, str[0], str + 1); break; case 'a': // "afva" r_anal_var_delete_all (core->anal, fcn->addr, R_ANAL_VAR_KIND_REG); r_anal_var_delete_all (core->anal, fcn->addr, R_ANAL_VAR_KIND_BPV); r_anal_var_delete_all (core->anal, fcn->addr, R_ANAL_VAR_KIND_SPV); fcn_callconv (core, fcn); free (p); return true; case 'n': if (str[1]) { // "afvn" char *old_name = r_str_trim_head (strchr (ostr, ' ')); if (!old_name) { free (ostr); return false; } char *new_name = strchr (old_name, ' '); if (!new_name) { free (ostr); return false; } *new_name++ = 0; r_str_trim (new_name); v1 = r_anal_var_get_byname (core->anal, fcn, old_name); if (v1) { r_anal_var_rename (core->anal, fcn->addr, R_ANAL_VAR_SCOPE_LOCAL, v1->kind, old_name, new_name); r_anal_var_free (v1); } else { eprintf ("Cant find var by name\n"); } free (ostr); } else { RListIter *iter; RAnalVar *v; RList *list = r_anal_var_list (core->anal, fcn, 0); r_list_foreach (list, iter, v) { r_cons_printf ("%s\n", v->name); } } return true; case 'd': // "afvd" if (str[1]) { p = r_str_trim (strchr (ostr, ' ')); if (!p) { free (ostr); return false; } v1 = r_anal_var_get_byname (core->anal, fcn, p); if (!v1) { free (ostr); return false; } r_anal_var_display (core->anal, v1->delta, v1->kind, v1->type); r_anal_var_free (v1); free (ostr); } else { RListIter *iter; RAnalVar *p; RList *list = r_anal_var_list (core->anal, fcn, 0); r_list_foreach (list, iter, p) { char *a = r_core_cmd_strf (core, ".afvd %s", p->name); if ((a && !*a) || !a) { free (a); a = strdup ("\n"); } r_cons_printf ("var %s = %s", p->name, a); free (a); } r_list_free (list); // args list = r_anal_var_list (core->anal, fcn, 1); r_list_foreach (list, iter, p) { char *a = r_core_cmd_strf (core, ".afvd %s", p->name); if ((a && !*a) || !a) { free (a); a = strdup ("\n"); } r_cons_printf ("arg %s = %s", p->name, a); free (a); } r_list_free (list); } return true; case 't':{ // "afvt" p = strchr (ostr, ' '); if (!p++) { free (ostr); return false; } char *type = strchr (p, ' '); if (!type) { free (ostr); return false; } *type++ = 0; v1 = r_anal_var_get_byname (core->anal, fcn, p); if (!v1) { eprintf ("Cant find get by name %s\n", p); free (ostr); return false; } r_anal_var_retype (core->anal, fcn->addr, R_ANAL_VAR_SCOPE_LOCAL, -1, v1->kind, type, -1, p); r_anal_var_free (v1); free (ostr); return true; } } switch (str[1]) { case '\0': case '*': case 'j': r_anal_var_list_show (core->anal, fcn, type, str[1]); break; case '.': r_anal_var_list_show (core->anal, fcn, core->offset, 0); break; case '-': // "afv[bsr]-" if (str[2] == '*') { r_anal_var_delete_all (core->anal, fcn->addr, type); } else { if (IS_DIGIT (str[2])) { r_anal_var_delete (core->anal, fcn->addr, type, 1, (int)r_num_math (core->num, str + 1)); } else { char *name = r_str_trim ( strdup (str + 2)); r_anal_var_delete_byname (core->anal, fcn, type, name); free (name); } } break; case 'd': eprintf ("This command is deprecated, use afvd instead\n"); break; case 't': eprintf ("This command is deprecated use afvt instead\n"); break; case 's': case 'g': if (str[2] != '\0') { int rw = 0; // 0 = read, 1 = write RAnalVar *var = r_anal_var_get (core->anal, fcn->addr, (char)type, atoi (str + 2), R_ANAL_VAR_SCOPE_LOCAL); if (!var) { eprintf ("Cannot find variable in: '%s'\n", str); res = false; break; } if (var != NULL) { int scope = (str[1] == 'g')? 0: 1; r_anal_var_access (core->anal, fcn->addr, (char)type, scope, atoi (str + 2), rw, core->offset); r_anal_var_free (var); break; } } else { eprintf ("Missing argument\n"); } break; case ' ': { const char *name; char *vartype; int size = 4; int scope = 1; for (str++; *str == ' ';) str++; p = strchr (str, ' '); if (!p) { var_help (core, type); break; } *p++ = 0; if (type == 'r') { //registers RRegItem *i = r_reg_get (core->anal->reg, str, -1); if (!i) { eprintf ("Register not found"); break; } delta = i->index; } else { delta = r_num_math (core->num, str); } name = p; vartype = strchr (name, ' '); if (vartype) { *vartype++ = 0; r_anal_var_add (core->anal, fcn->addr, scope, delta, type, vartype, size, name); } else { eprintf ("Missing name\n"); } } break; }; free (ostr); return res; } static void print_trampolines(RCore *core, ut64 a, ut64 b, size_t element_size) { int i; for (i = 0; i < core->blocksize; i += element_size) { ut32 n; memcpy (&n, core->block + i, sizeof (ut32)); if (n >= a && n <= b) { if (element_size == 4) { r_cons_printf ("f trampoline.%x @ 0x%" PFMT64x "\n", n, core->offset + i); } else { r_cons_printf ("f trampoline.%" PFMT64x " @ 0x%" PFMT64x "\n", n, core->offset + i); } r_cons_printf ("Cd %u @ 0x%" PFMT64x ":%u\n", element_size, core->offset + i, element_size); // TODO: add data xrefs } } } static void cmd_anal_trampoline(RCore *core, const char *input) { int bits = r_config_get_i (core->config, "asm.bits"); char *p, *inp = strdup (input); p = strchr (inp, ' '); if (p) { *p = 0; } ut64 a = r_num_math (core->num, inp); ut64 b = p? r_num_math (core->num, p + 1): 0; free (inp); switch (bits) { case 32: print_trampolines (core, a, b, 4); break; case 64: print_trampolines (core, a, b, 8); break; } } R_API char *cmd_syscall_dostr(RCore *core, int n) { char *res = NULL; int i; char str[64]; if (n == -1) { n = (int)r_debug_reg_get (core->dbg, "oeax"); if (!n || n == -1) { const char *a0 = r_reg_get_name (core->anal->reg, R_REG_NAME_SN); n = (int)r_debug_reg_get (core->dbg, a0); } } RSyscallItem *item = r_syscall_get (core->anal->syscall, n, -1); if (!item) { res = r_str_appendf (res, "%d = unknown ()", n); return res; } res = r_str_appendf (res, "%d = %s (", item->num, item->name); // TODO: move this to r_syscall //TODO replace the hardcoded CC with the sdb ones for (i = 0; i < item->args; i++) { // XXX this is a hack to make syscall args work on x86-32 and x86-64 // we need to shift sn first.. which is bad, but needs to be redesigned int regidx = i; if (core->assembler->bits == 32) { regidx++; } ut64 arg = r_debug_arg_get (core->dbg, R_ANAL_CC_TYPE_FASTCALL, regidx); //r_cons_printf ("(%d:0x%"PFMT64x")\n", i, arg); if (item->sargs) { switch (item->sargs[i]) { case 'p': // pointer res = r_str_appendf (res, "0x%08" PFMT64x "", arg); break; case 'i': res = r_str_appendf (res, "%" PFMT64d "", arg); break; case 'z': memset (str, 0, sizeof (str)); r_io_read_at (core->io, arg, (ut8 *)str, sizeof (str) - 1); r_str_filter (str, strlen (str)); res = r_str_appendf (res, "\"%s\"", str); break; case 'Z': { //TODO replace the hardcoded CC with the sdb ones ut64 len = r_debug_arg_get (core->dbg, R_ANAL_CC_TYPE_FASTCALL, i + 2); len = R_MIN (len + 1, sizeof (str) - 1); if (len == 0) { len = 16; // override default } (void)r_io_read_at (core->io, arg, (ut8 *)str, len); str[len] = 0; r_str_filter (str, -1); res = r_str_appendf (res, "\"%s\"", str); } break; default: res = r_str_appendf (res, "0x%08" PFMT64x "", arg); break; } } else { res = r_str_appendf (res, "0x%08" PFMT64x "", arg); } if (i + 1 < item->args) { res = r_str_appendf (res, ", "); } } r_syscall_item_free (item); res = r_str_appendf (res, ")"); return res; } static void cmd_syscall_do(RCore *core, int n) { char *msg = cmd_syscall_dostr (core, n); if (msg) { r_cons_println (msg); free (msg); } } static void core_anal_bytes(RCore *core, const ut8 *buf, int len, int nops, int fmt) { int stacksize = r_config_get_i (core->config, "esil.stack.depth"); bool iotrap = r_config_get_i (core->config, "esil.iotrap"); bool romem = r_config_get_i (core->config, "esil.romem"); bool stats = r_config_get_i (core->config, "esil.stats"); bool be = core->print->big_endian; bool use_color = core->print->flags & R_PRINT_FLAGS_COLOR; core->parser->relsub = r_config_get_i (core->config, "asm.relsub"); int ret, i, j, idx, size; const char *color = ""; const char *esilstr; const char *opexstr; RAnalHint *hint; RAnalEsil *esil = NULL; RAsmOp asmop; RAnalOp op; ut64 addr; bool isFirst = true; unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); int totalsize = 0; // Variables required for setting up ESIL to REIL conversion if (use_color) { color = core->cons->pal.label; } switch (fmt) { case 'j': r_cons_printf ("["); break; case 'r': // Setup for ESIL to REIL conversion esil = r_anal_esil_new (stacksize, iotrap, addrsize); if (!esil) { return; } r_anal_esil_to_reil_setup (esil, core->anal, romem, stats); r_anal_esil_set_pc (esil, core->offset); break; } for (i = idx = ret = 0; idx < len && (!nops || (nops && i < nops)); i++, idx += ret) { addr = core->offset + idx; // TODO: use more anal hints hint = r_anal_hint_get (core->anal, addr); r_asm_set_pc (core->assembler, addr); ret = r_asm_disassemble (core->assembler, &asmop, buf + idx, len - idx); ret = r_anal_op (core->anal, &op, core->offset + idx, buf + idx, len - idx, R_ANAL_OP_MASK_ALL); esilstr = R_STRBUF_SAFEGET (&op.esil); opexstr = R_STRBUF_SAFEGET (&op.opex); char *mnem = strdup (asmop.buf_asm); char *sp = strchr (mnem, ' '); if (sp) { *sp = 0; } if (ret < 1 && fmt != 'd') { eprintf ("Oops at 0x%08" PFMT64x " (", core->offset + idx); for (i = idx, j = 0; i < core->blocksize && j < 3; ++i, ++j) { eprintf ("%02x ", buf[i]); } eprintf ("...)\n"); break; } size = (hint && hint->size)? hint->size: op.size; if (fmt == 'd') { char *opname = strdup (asmop.buf_asm); r_str_split (opname, ' '); char *d = r_asm_describe (core->assembler, opname); if (d && *d) { r_cons_printf ("%s: %s\n", opname, d); free (d); } else { eprintf ("Unknown opcode\n"); } free (opname); } else if (fmt == 'e') { if (*esilstr) { if (use_color) { r_cons_printf ("%s0x%" PFMT64x Color_RESET " %s\n", color, core->offset + idx, esilstr); } else { r_cons_printf ("0x%" PFMT64x " %s\n", core->offset + idx, esilstr); } } } else if (fmt == 's') { totalsize += op.size; } else if (fmt == 'r') { if (*esilstr) { if (use_color) { r_cons_printf ("%s0x%" PFMT64x Color_RESET "\n", color, core->offset + idx); } else { r_cons_printf ("0x%" PFMT64x "\n", core->offset + idx); } r_anal_esil_parse (esil, esilstr); r_anal_esil_dumpstack (esil); r_anal_esil_stack_free (esil); } } else if (fmt == 'j') { if (isFirst) { isFirst = false; } else { r_cons_print (","); } r_cons_printf ("{\"opcode\":\"%s\",", asmop.buf_asm); { char strsub[128] = { 0 }; // pc+33 r_parse_varsub (core->parser, NULL, core->offset + idx, asmop.size, asmop.buf_asm, strsub, sizeof (strsub)); { ut64 killme = UT64_MAX; if (r_io_read_i (core->io, op.ptr, &killme, op.refptr, be)) { core->parser->relsub_addr = killme; } } // 0x33->sym.xx char *p = strdup (strsub); r_parse_filter (core->parser, core->flags, p, strsub, sizeof (strsub), be); free (p); r_cons_printf ("\"disasm\":\"%s\",", strsub); } r_cons_printf ("\"mnemonic\":\"%s\",", mnem); if (hint && hint->opcode) { r_cons_printf ("\"ophint\":\"%s\",", hint->opcode); } r_cons_printf ("\"prefix\":%" PFMT64d ",", op.prefix); r_cons_printf ("\"id\":%d,", op.id); if (opexstr && *opexstr) { r_cons_printf ("\"opex\":%s,", opexstr); } r_cons_printf ("\"addr\":%" PFMT64d ",", core->offset + idx); r_cons_printf ("\"bytes\":\""); for (j = 0; j < size; j++) { r_cons_printf ("%02x", buf[j + idx]); } r_cons_printf ("\","); if (op.val != UT64_MAX) { r_cons_printf ("\"val\": %" PFMT64d ",", op.val); } if (op.ptr != UT64_MAX) { r_cons_printf ("\"ptr\": %" PFMT64d ",", op.ptr); } r_cons_printf ("\"size\": %d,", size); r_cons_printf ("\"type\": \"%s\",", r_anal_optype_to_string (op.type)); if (op.reg) { r_cons_printf ("\"reg\": \"%s\",", op.reg); } if (hint && hint->esil) { r_cons_printf ("\"esil\": \"%s\",", hint->esil); } else if (*esilstr) { r_cons_printf ("\"esil\": \"%s\",", esilstr); } if (hint && hint->jump != UT64_MAX) { op.jump = hint->jump; } if (op.jump != UT64_MAX) { r_cons_printf ("\"jump\":%" PFMT64d ",", op.jump); } if (hint && hint->fail != UT64_MAX) { op.fail = hint->fail; } if (op.refptr != -1) { r_cons_printf ("\"refptr\":%d,", op.refptr); } if (op.fail != UT64_MAX) { r_cons_printf ("\"fail\":%" PFMT64d ",", op.fail); } r_cons_printf ("\"cycles\":%d,", op.cycles); if (op.failcycles) { r_cons_printf ("\"failcycles\":%d,", op.failcycles); } r_cons_printf ("\"delay\":%d,", op.delay); { const char *p = r_anal_stackop_tostring (op.stackop); if (p && *p && strcmp (p, "null")) r_cons_printf ("\"stack\":\"%s\",", p); } if (op.stackptr) { r_cons_printf ("\"stackptr\":%d,", op.stackptr); } { const char *arg = (op.type & R_ANAL_OP_TYPE_COND) ? r_anal_cond_tostring (op.cond): NULL; if (arg) { r_cons_printf ("\"cond\":\"%s\",", arg); } } r_cons_printf ("\"family\":\"%s\"}", r_anal_op_family_to_string (op.family)); } else { #define printline(k, fmt, arg)\ { \ if (use_color)\ r_cons_printf ("%s%s: " Color_RESET, color, k);\ else\ r_cons_printf ("%s: ", k);\ if (fmt) r_cons_printf (fmt, arg);\ } printline ("address", "0x%" PFMT64x "\n", core->offset + idx); printline ("opcode", "%s\n", asmop.buf_asm); printline ("mnemonic", "%s\n", mnem); if (hint) { if (hint->opcode) { printline ("ophint", "%s\n", hint->opcode); } #if 0 // addr should not override core->offset + idx.. its silly if (hint->addr != UT64_MAX) { printline ("addr", "0x%08" PFMT64x "\n", (hint->addr + idx)); } #endif } printline ("prefix", "%" PFMT64d "\n", op.prefix); printline ("id", "%d\n", op.id); #if 0 // no opex here to avoid lot of tests broken..and having json in here is not much useful imho if (opexstr && *opexstr) { printline ("opex", "%s\n", opexstr); } #endif printline ("bytes", NULL, 0); for (j = 0; j < size; j++) { r_cons_printf ("%02x", buf[j + idx]); } r_cons_newline (); if (op.val != UT64_MAX) printline ("val", "0x%08" PFMT64x "\n", op.val); if (op.ptr != UT64_MAX) printline ("ptr", "0x%08" PFMT64x "\n", op.ptr); if (op.refptr != -1) printline ("refptr", "%d\n", op.refptr); printline ("size", "%d\n", size); printline ("type", "%s\n", r_anal_optype_to_string (op.type)); { const char *t2 = r_anal_optype_to_string (op.type2); if (t2 && strcmp (t2, "null")) { printline ("type2", "%s\n", t2); } } if (op.reg) { printline ("reg", "%s\n", op.reg); } if (hint && hint->esil) { printline ("esil", "%s\n", hint->esil); } else if (*esilstr) { printline ("esil", "%s\n", esilstr); } if (hint && hint->jump != UT64_MAX) { op.jump = hint->jump; } if (op.jump != UT64_MAX) { printline ("jump", "0x%08" PFMT64x "\n", op.jump); } if (op.direction != 0) { const char * dir = op.direction == 1 ? "read" : op.direction == 2 ? "write" : op.direction == 4 ? "exec" : op.direction == 8 ? "ref": "none"; printline ("direction", "%s\n", dir); } if (hint && hint->fail != UT64_MAX) { op.fail = hint->fail; } if (op.fail != UT64_MAX) { printline ("fail", "0x%08" PFMT64x "\n", op.fail); } if (op.delay) { printline ("delay", "%d\n", op.delay); } printline ("stack", "%s\n", r_anal_stackop_tostring (op.stackop)); { const char *arg = (op.type & R_ANAL_OP_TYPE_COND)? r_anal_cond_tostring (op.cond): NULL; if (arg) { printline ("cond", "%s\n", arg); } } printline ("family", "%s\n", r_anal_op_family_to_string (op.family)); printline ("stackop", "%s\n", r_anal_stackop_tostring (op.stackop)); if (op.stackptr) { printline ("stackptr", "%"PFMT64d"\n", op.stackptr); } } //r_cons_printf ("false: 0x%08"PFMT64x"\n", core->offset+idx); //free (hint); free (mnem); r_anal_hint_free (hint); } if (fmt == 'j') { r_cons_printf ("]"); r_cons_newline (); } else if (fmt == 's') { r_cons_printf ("%d\n", totalsize); } r_anal_esil_free (esil); } static int bb_cmp(const void *a, const void *b) { const RAnalBlock *ba = a; const RAnalBlock *bb = b; return ba->addr - bb->addr; } static int anal_fcn_list_bb(RCore *core, const char *input, bool one) { RDebugTracepoint *tp = NULL; RListIter *iter; RAnalBlock *b; int mode = 0; ut64 addr, bbaddr = UT64_MAX; bool firstItem = true; if (*input == '.') { one = true; input++; } if (*input) { mode = *input; input++; } if (*input == '.') { one = true; input++; } if (input && *input) { addr = bbaddr = r_num_math (core->num, input); } else { addr = core->offset; } if (one) { bbaddr = addr; } RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, 0); if (!fcn) { return false; } switch (mode) { case 'j': r_cons_printf ("["); break; case '*': r_cons_printf ("fs blocks\n"); break; } r_list_sort (fcn->bbs, bb_cmp); r_list_foreach (fcn->bbs, iter, b) { if (one) { if (bbaddr != UT64_MAX && (bbaddr < b->addr || bbaddr >= (b->addr + b->size))) { continue; } } switch (mode) { case 'r': if (b->jump == UT64_MAX) { ut64 retaddr = b->addr; if (b->op_pos) { retaddr += b->op_pos[b->ninstr - 2]; } if (!strcmp (input, "*")) { r_cons_printf ("db 0x%08"PFMT64x"\n", retaddr); } else if (!strcmp (input, "-*")) { r_cons_printf ("db-0x%08"PFMT64x"\n", retaddr); } else { r_cons_printf ("0x%08"PFMT64x"\n", retaddr); } } break; case '*': r_cons_printf ("f bb.%05" PFMT64x " = 0x%08" PFMT64x "\n", b->addr & 0xFFFFF, b->addr); break; case 'q': r_cons_printf ("0x%08" PFMT64x "\n", b->addr); break; case 'j': //r_cons_printf ("%" PFMT64d "%s", b->addr, iter->n? ",": ""); { RListIter *iter2; RAnalBlock *b2; int inputs = 0; int outputs = 0; r_list_foreach (fcn->bbs, iter2, b2) { if (b2->jump == b->addr) { inputs++; } if (b2->fail == b->addr) { inputs++; } } if (b->jump != UT64_MAX) { outputs ++; } if (b->fail != UT64_MAX) { outputs ++; } r_cons_printf ("%s{", firstItem? "": ","); firstItem = false; if (b->jump != UT64_MAX) { r_cons_printf ("\"jump\":%"PFMT64d",", b->jump); } if (b->fail != UT64_MAX) { r_cons_printf ("\"fail\":%"PFMT64d",", b->fail); } r_cons_printf ("\"addr\":%" PFMT64d ",\"size\":%d,\"inputs\":%d,\"outputs\":%d,\"ninstr\":%d,\"traced\":%s}", b->addr, b->size, inputs, outputs, b->ninstr, r_str_bool (b->traced)); } break; case 'i': { RListIter *iter2; RAnalBlock *b2; int inputs = 0; int outputs = 0; r_list_foreach (fcn->bbs, iter2, b2) { if (b2->jump == b->addr) { inputs++; } if (b2->fail == b->addr) { inputs++; } } if (b->jump != UT64_MAX) { outputs ++; } if (b->fail != UT64_MAX) { outputs ++; } firstItem = false; if (b->jump != UT64_MAX) { r_cons_printf ("jump: 0x%08"PFMT64x"\n", b->jump); } if (b->fail != UT64_MAX) { r_cons_printf ("fail: 0x%08"PFMT64x"\n", b->fail); } r_cons_printf ("addr: 0x%08"PFMT64x"\nsize: %d\ninputs: %d\noutputs: %d\nninstr: %d\ntraced: %s\n", b->addr, b->size, inputs, outputs, b->ninstr, r_str_bool (b->traced)); } break; default: tp = r_debug_trace_get (core->dbg, b->addr); r_cons_printf ("0x%08" PFMT64x " 0x%08" PFMT64x " %02X:%04X %d", b->addr, b->addr + b->size, tp? tp->times: 0, tp? tp->count: 0, b->size); if (b->jump != UT64_MAX) { r_cons_printf (" j 0x%08" PFMT64x, b->jump); } if (b->fail != UT64_MAX) { r_cons_printf (" f 0x%08" PFMT64x, b->fail); } r_cons_newline (); break; } } if (mode == 'j') { r_cons_printf ("]\n"); } return true; } static bool anal_bb_edge (RCore *core, const char *input) { // "afbe" switch-bb-addr case-bb-addr char *arg = strdup (r_str_trim_ro(input)); char *sp = strchr (arg, ' '); if (sp) { *sp++ = 0; ut64 sw_at = r_num_math (core->num, arg); ut64 cs_at = r_num_math (core->num, sp); RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, sw_at, 0); if (fcn) { RAnalBlock *bb; RListIter *iter; r_list_foreach (fcn->bbs, iter, bb) { if (sw_at >= bb->addr && sw_at < (bb->addr + bb->size)) { if (!bb->switch_op) { bb->switch_op = r_anal_switch_op_new ( sw_at, 0, 0); } r_anal_switch_op_add_case (bb->switch_op, cs_at, 0, cs_at); } } free (arg); return true; } } free (arg); return false; } static bool anal_fcn_del_bb(RCore *core, const char *input) { ut64 addr = r_num_math (core->num, input); if (!addr) { addr = core->offset; } RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, -1); if (fcn) { if (!strcmp (input, "*")) { r_list_free (fcn->bbs); fcn->bbs = NULL; } else { RAnalBlock *b; RListIter *iter; r_list_foreach (fcn->bbs, iter, b) { if (b->addr == addr) { r_list_delete (fcn->bbs, iter); return true; } } eprintf ("Cannot find basic block\n"); } } else { eprintf ("Cannot find function\n"); } return false; } static int anal_fcn_add_bb(RCore *core, const char *input) { // fcn_addr bb_addr bb_size [jump] [fail] char *ptr; const char *ptr2 = NULL; ut64 fcnaddr = -1LL, addr = -1LL; ut64 size = 0LL; ut64 jump = UT64_MAX; ut64 fail = UT64_MAX; int type = R_ANAL_BB_TYPE_NULL; RAnalFunction *fcn = NULL; RAnalDiff *diff = NULL; while (*input == ' ') input++; ptr = strdup (input); switch (r_str_word_set0 (ptr)) { case 7: ptr2 = r_str_word_get0 (ptr, 6); if (!(diff = r_anal_diff_new ())) { eprintf ("error: Cannot init RAnalDiff\n"); free (ptr); return false; } if (ptr2[0] == 'm') { diff->type = R_ANAL_DIFF_TYPE_MATCH; } else if (ptr2[0] == 'u') { diff->type = R_ANAL_DIFF_TYPE_UNMATCH; } case 6: ptr2 = r_str_word_get0 (ptr, 5); if (strchr (ptr2, 'h')) { type |= R_ANAL_BB_TYPE_HEAD; } if (strchr (ptr2, 'b')) { type |= R_ANAL_BB_TYPE_BODY; } if (strchr (ptr2, 'l')) { type |= R_ANAL_BB_TYPE_LAST; } if (strchr (ptr2, 'f')) { type |= R_ANAL_BB_TYPE_FOOT; } case 5: // get fail fail = r_num_math (core->num, r_str_word_get0 (ptr, 4)); case 4: // get jump jump = r_num_math (core->num, r_str_word_get0 (ptr, 3)); case 3: // get size size = r_num_math (core->num, r_str_word_get0 (ptr, 2)); case 2: // get addr addr = r_num_math (core->num, r_str_word_get0 (ptr, 1)); case 1: // get fcnaddr fcnaddr = r_num_math (core->num, r_str_word_get0 (ptr, 0)); } fcn = r_anal_get_fcn_in (core->anal, fcnaddr, 0); if (fcn) { int ret = r_anal_fcn_add_bb (core->anal, fcn, addr, size, jump, fail, type, diff); if (!ret) { eprintf ("Cannot add basic block\n"); } } else { eprintf ("Cannot find function at 0x%" PFMT64x "\n", fcnaddr); } r_anal_diff_free (diff); free (ptr); return true; } static void r_core_anal_nofunclist (RCore *core, const char *input) { int minlen = (int)(input[0]==' ') ? r_num_math (core->num, input + 1): 16; ut64 code_size = r_num_get (core->num, "$SS"); ut64 base_addr = r_num_get (core->num, "$S"); ut64 chunk_size, chunk_offset, i; RListIter *iter, *iter2; RAnalFunction *fcn; RAnalBlock *b; char* bitmap; int counter; if (minlen < 1) { minlen = 1; } if (code_size < 1) { return; } bitmap = calloc (1, code_size+64); if (!bitmap) { return; } // for each function r_list_foreach (core->anal->fcns, iter, fcn) { // for each basic block in the function r_list_foreach (fcn->bbs, iter2, b) { // if it is not withing range, continue if ((fcn->addr < base_addr) || (fcn->addr >= base_addr+code_size)) continue; // otherwise mark each byte in the BB in the bitmap for (counter = 0; counter < b->size; counter++) { bitmap[b->addr+counter-base_addr] = '='; } // finally, add a special marker to show the beginning of a // function bitmap[fcn->addr-base_addr] = 'F'; } } // Now we print the list of memory regions that are not assigned to a function chunk_size = 0; chunk_offset = 0; for (i = 0; i < code_size; i++) { if (bitmap[i]){ // We only print a region is its size is bigger than 15 bytes if (chunk_size >= minlen){ fcn = r_anal_get_fcn_in (core->anal, base_addr+chunk_offset, R_ANAL_FCN_TYPE_FCN | R_ANAL_FCN_TYPE_SYM); if (fcn) { r_cons_printf ("0x%08"PFMT64x" %6d %s\n", base_addr+chunk_offset, chunk_size, fcn->name); } else { r_cons_printf ("0x%08"PFMT64x" %6d\n", base_addr+chunk_offset, chunk_size); } } chunk_size = 0; chunk_offset = i+1; continue; } chunk_size+=1; } if (chunk_size >= 16) { fcn = r_anal_get_fcn_in (core->anal, base_addr+chunk_offset, R_ANAL_FCN_TYPE_FCN | R_ANAL_FCN_TYPE_SYM); if (fcn) { r_cons_printf ("0x%08"PFMT64x" %6d %s\n", base_addr+chunk_offset, chunk_size, fcn->name); } else { r_cons_printf ("0x%08"PFMT64x" %6d\n", base_addr+chunk_offset, chunk_size); } } free(bitmap); } static void r_core_anal_fmap (RCore *core, const char *input) { int show_color = r_config_get_i (core->config, "scr.color"); int cols = r_config_get_i (core->config, "hex.cols") * 4; ut64 code_size = r_num_get (core->num, "$SS"); ut64 base_addr = r_num_get (core->num, "$S"); RListIter *iter, *iter2; RAnalFunction *fcn; RAnalBlock *b; char* bitmap; int assigned; ut64 i; if (code_size < 1) { return; } bitmap = calloc (1, code_size+64); if (!bitmap) { return; } // for each function r_list_foreach (core->anal->fcns, iter, fcn) { // for each basic block in the function r_list_foreach (fcn->bbs, iter2, b) { // if it is not within range, continue if ((fcn->addr < base_addr) || (fcn->addr >= base_addr+code_size)) continue; // otherwise mark each byte in the BB in the bitmap int counter = 1; for (counter = 0; counter < b->size; counter++) { bitmap[b->addr+counter-base_addr] = '='; } bitmap[fcn->addr-base_addr] = 'F'; } } // print the bitmap assigned = 0; if (cols < 1) { cols = 1; } for (i = 0; i < code_size; i += 1) { if (!(i % cols)) { r_cons_printf ("\n0x%08"PFMT64x" ", base_addr+i); } if (bitmap[i]) { assigned++; } if (show_color) { if (bitmap[i]) { r_cons_printf ("%s%c\x1b[0m", Color_GREEN, bitmap[i]); } else { r_cons_printf ("."); } } else { r_cons_printf ("%c", bitmap[i] ? bitmap[i] : '.' ); } } r_cons_printf ("\n%d / %d (%.2lf%%) bytes assigned to a function\n", assigned, code_size, 100.0*( (float) assigned) / code_size); free(bitmap); } static bool fcnNeedsPrefix(const char *name) { if (!strncmp (name, "entry", 5)) { return false; } if (!strncmp (name, "main", 4)) { return false; } return (!strchr (name, '.')); } /* TODO: move into r_anal_fcn_rename(); */ static bool setFunctionName(RCore *core, ut64 off, const char *_name, bool prefix) { char *name, *oname, *nname = NULL; RAnalFunction *fcn; if (!core || !_name) { return false; } const char *fcnpfx = r_config_get (core->config, "anal.fcnprefix"); if (!fcnpfx) { fcnpfx = "fcn"; } if (r_reg_get (core->anal->reg, _name, -1)) { name = r_str_newf ("%s.%s", fcnpfx, _name); } else { name = strdup (_name); } fcn = r_anal_get_fcn_in (core->anal, off, R_ANAL_FCN_TYPE_FCN | R_ANAL_FCN_TYPE_SYM | R_ANAL_FCN_TYPE_LOC); if (!fcn) { return false; } if (prefix && fcnNeedsPrefix (name)) { nname = r_str_newf ("%s.%s", fcnpfx, name); } else { nname = strdup (name); } oname = fcn->name; r_flag_rename (core->flags, r_flag_get (core->flags, fcn->name), nname); fcn->name = strdup (nname); if (core->anal->cb.on_fcn_rename) { core->anal->cb.on_fcn_rename (core->anal, core->anal->user, fcn, nname); } free (oname); free (nname); free (name); return true; } static void afCc(RCore *core, const char *input) { ut64 addr; RAnalFunction *fcn; if (*input == ' ') { addr = r_num_math (core->num, input); } else { addr = core->offset; } if (addr == 0LL) { fcn = r_anal_fcn_find_name (core->anal, input + 3); } else { fcn = r_anal_get_fcn_in (core->anal, addr, R_ANAL_FCN_TYPE_NULL); } if (fcn) { ut32 totalCycles = r_anal_fcn_cost (core->anal, fcn); // FIXME: This defeats the purpose of the function, but afC is used in project files. // cf. canal.c r_cons_printf ("%d\n", totalCycles); } else { eprintf ("Cannot find function\n"); } } static int cmd_anal_fcn(RCore *core, const char *input) { char i; r_cons_break_timeout (r_config_get_i (core->config, "anal.timeout")); switch (input[1]) { case 'f': // "aff" r_anal_fcn_fit_overlaps (core->anal, NULL); break; case 'a': if (input[2] == 'l') { // afal : list function call arguments int show_args = r_config_get_i (core->config, "dbg.funcarg"); if (show_args) { r_core_print_func_args (core); } break; } case 'd': // "afd" { ut64 addr = 0; if (input[2] == '?') { eprintf ("afd [offset]\n"); } else if (input[2] == ' ') { addr = r_num_math (core->num, input + 2); } else { addr = core->offset; } RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, 0); if (fcn) { if (fcn->addr != addr) { r_cons_printf ("%s + %d\n", fcn->name, (int)(addr - fcn->addr)); } else { r_cons_println (fcn->name); } } else { eprintf ("Cannot find function\n"); } } break; case '-': // "af-" if (!input[2] || !strcmp (input + 2, "*")) { r_anal_fcn_del_locs (core->anal, UT64_MAX); r_anal_fcn_del (core->anal, UT64_MAX); } else { ut64 addr = input[2] ? r_num_math (core->num, input + 2) : core->offset; r_anal_fcn_del_locs (core->anal, addr); r_anal_fcn_del (core->anal, addr); } break; case 'u': // "afu" { ut64 addr = core->offset; ut64 addr_end = r_num_math (core->num, input + 2); if (addr_end < addr) { eprintf ("Invalid address ranges\n"); } else { int depth = 1; ut64 a, b; const char *c; a = r_config_get_i (core->config, "anal.from"); b = r_config_get_i (core->config, "anal.to"); c = r_config_get (core->config, "anal.limits"); r_config_set_i (core->config, "anal.from", addr); r_config_set_i (core->config, "anal.to", addr_end); r_config_set (core->config, "anal.limits", "true"); RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, 0); if (fcn) { r_anal_fcn_resize (core->anal, fcn, addr_end - addr); } r_core_anal_fcn (core, addr, UT64_MAX, R_ANAL_REF_TYPE_NULL, depth); fcn = r_anal_get_fcn_in (core->anal, addr, 0); if (fcn) { r_anal_fcn_resize (core->anal, fcn, addr_end - addr); } r_config_set_i (core->config, "anal.from", a); r_config_set_i (core->config, "anal.to", b); r_config_set (core->config, "anal.limits", c? c: ""); } } break; case '+': { // "af+" if (input[2] != ' ') { eprintf ("Missing arguments\n"); return false; } char *ptr = strdup (input + 3); const char *ptr2; int n = r_str_word_set0 (ptr); const char *name = NULL; ut64 addr = UT64_MAX; ut64 size = 0LL; RAnalDiff *diff = NULL; int type = R_ANAL_FCN_TYPE_FCN; if (n > 1) { switch (n) { case 5: size = r_num_math (core->num, r_str_word_get0 (ptr, 4)); case 4: ptr2 = r_str_word_get0 (ptr, 3); if (!(diff = r_anal_diff_new ())) { eprintf ("error: Cannot init RAnalDiff\n"); free (ptr); return false; } if (ptr2[0] == 'm') { diff->type = R_ANAL_DIFF_TYPE_MATCH; } else if (ptr2[0] == 'u') { diff->type = R_ANAL_DIFF_TYPE_UNMATCH; } case 3: ptr2 = r_str_word_get0 (ptr, 2); if (strchr (ptr2, 'l')) { type = R_ANAL_FCN_TYPE_LOC; } else if (strchr (ptr2, 'i')) { type = R_ANAL_FCN_TYPE_IMP; } else if (strchr (ptr2, 's')) { type = R_ANAL_FCN_TYPE_SYM; } else { type = R_ANAL_FCN_TYPE_FCN; } case 2: name = r_str_word_get0 (ptr, 1); case 1: addr = r_num_math (core->num, r_str_word_get0 (ptr, 0)); } if (!r_anal_fcn_add (core->anal, addr, size, name, type, diff)) { eprintf ("Cannot add function (duplicated)\n"); } } r_anal_diff_free (diff); free (ptr); } break; case 'o': // "afo" { RAnalFunction *fcn; ut64 addr = core->offset; if (input[2] == ' ') addr = r_num_math (core->num, input + 3); if (addr == 0LL) { fcn = r_anal_fcn_find_name (core->anal, input + 3); } else { fcn = r_anal_get_fcn_in (core->anal, addr, R_ANAL_FCN_TYPE_NULL); } if (fcn) { r_cons_printf ("0x%08" PFMT64x "\n", fcn->addr); } } break; case 'i': // "afi" switch (input[2]) { case '?': r_core_cmd_help (core, help_msg_afi); break; case '.': // "afi." { ut64 addr = core->offset; if (input[3] == ' ') { addr = r_num_math (core->num, input + 3); } RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, R_ANAL_FCN_TYPE_NULL); if (fcn) { r_cons_printf ("%s\n", fcn->name); } } break; case 'l': // "afil" if (input[3] == '?') { // TODO #7967 help refactor help_msg_afll[1] = "afil"; r_core_cmd_help (core, help_msg_afll); break; } /* fallthrough */ case 'j': // "afij" case '*': // "afi*" r_core_anal_fcn_list (core, input + 3, input + 2); break; default: i = 1; r_core_anal_fcn_list (core, input + 2, &i); break; } break; case 'l': // "afl" switch (input[2]) { case '?': r_core_cmd_help (core, help_msg_afl); break; case 'l': // "afll" if (input[3] == '?') { // TODO #7967 help refactor help_msg_afll[1] = "afll"; r_core_cmd_help (core, help_msg_afll); break; } /* fallthrough */ case 'j': // "aflj" case 'q': // "aflq" case 's': // "afls" case '*': // "afl*" r_core_anal_fcn_list (core, NULL, input + 2); break; case 'c': // "aflc" r_cons_printf ("%d\n", r_list_length (core->anal->fcns)); break; default: // "afl " r_core_anal_fcn_list (core, NULL, "o"); break; } break; case 's': // "afs" { ut64 addr; RAnalFunction *f; const char *arg = input + 3; if (input[2] && (addr = r_num_math (core->num, arg))) { arg = strchr (arg, ' '); if (arg) { arg++; } } else { addr = core->offset; } if ((f = r_anal_get_fcn_in (core->anal, addr, R_ANAL_FCN_TYPE_NULL))) { if (arg && *arg) { r_anal_str_to_fcn (core->anal, f, arg); } else { char *str = r_anal_fcn_to_string (core->anal, f); r_cons_println (str); free (str); } } else { eprintf ("No function defined at 0x%08" PFMT64x "\n", addr); } } break; case 'm': // "afm" - merge two functions r_core_anal_fcn_merge (core, core->offset, r_num_math (core->num, input + 2)); break; case 'M': // "afM" - print functions map r_core_anal_fmap (core, input + 1); break; case 'v': // "afv" var_cmd (core, input + 2); break; case 't': // "aft" type_cmd (core, input + 2); break; case 'C': // "afC" if (input[2] == 'c') { RAnalFunction *fcn; if ((fcn = r_anal_get_fcn_in (core->anal, core->offset, 0)) != NULL) { r_cons_printf ("%i\n", r_anal_fcn_cc (fcn)); } else { eprintf ("Error: Cannot find function at 0x08%" PFMT64x "\n", core->offset); } } else if (input[2] == 'l') { RAnalFunction *fcn; if ((fcn = r_anal_get_fcn_in (core->anal, core->offset, 0)) != NULL) { r_cons_printf ("%d\n", r_anal_fcn_loops (fcn)); } else { eprintf ("Error: Cannot find function at 0x08%" PFMT64x "\n", core->offset); } } else if (input[2] == '?') { r_core_cmd_help (core, help_msg_afC); } else { afCc (core, input + 3); } break; case 'c':{ // "afc" RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, 0); if (!fcn && !(input[2] == '?'|| input[2] == 'l' || input[2] == 'o')) { eprintf ("Cannot find function here\n"); break; } switch (input[2]) { case '\0': // "afc" r_cons_println (fcn->cc); break; case ' ': { // "afc " char *cc = r_str_trim (strdup (input + 3)); if (!r_anal_cc_exist (core->anal, cc)) { eprintf ("Unknown calling convention '%s'\n" "See afcl for available types\n", cc); } else { fcn->cc = r_str_const (r_anal_cc_to_constant (core->anal, cc)); } break; } case 'a': // "afca"" eprintf ("Todo\n"); break; case 'l': // "afcl" list all function Calling conventions. sdb_foreach (core->anal->sdb_cc, cc_print, NULL); break; case 'o': { // "afco" char *dbpath = r_str_trim (strdup (input + 3)); if (r_file_exists (dbpath)) { Sdb *db = sdb_new (0, dbpath, 0); sdb_merge (core->anal->sdb_cc, db); sdb_close (db); sdb_free (db); } free (dbpath); break; } case 'r': { // "afcr" int i; char *out, *cmd, *regname, *tmp; char *subvec_str = r_str_new (""); char *json_str = r_str_new (""); // if json_str initialize to NULL, it's possible for afcrj to output a (NULL) // subvec_str and json_str should be valid until exiting this code block bool json = input[3] == 'j'? true: false; for (i = 0; i <= 11; i++) { if (i == 0) { cmd = r_str_newf ("cc.%s.ret", fcn->cc); } else { cmd = r_str_newf ("cc.%s.arg%d", fcn->cc, i); } if (i < 7) { regname = r_str_new (cmd); } else { regname = r_str_newf ("cc.%s.float_arg%d", fcn->cc, i - 6); } out = sdb_querys (core->anal->sdb_cc, NULL, 0, cmd); free (cmd); if (out) { out[strlen (out) - 1] = 0; if (json) { tmp = subvec_str; subvec_str = r_str_newf ("%s,\"%s\"", subvec_str, out); free (tmp); } else { r_cons_printf ("%s: %s\n", regname, out); } free (out); } free (regname); if (!subvec_str[0]) { continue; } switch (i) { case 0: { tmp = json_str; json_str = r_str_newf ("%s,\"ret\":%s", json_str, subvec_str + 1); free (tmp); } break; case 6: { tmp = json_str; json_str = r_str_newf ("%s,\"args\":[%s]", json_str, subvec_str + 1); free (tmp); } break; case 11: { tmp = json_str; json_str = r_str_newf ("%s,\"float_args\":[%s]", json_str, subvec_str + 1); free (tmp); } break; default: continue; } free (subvec_str); subvec_str = r_str_new (""); } if (json && json_str[0]) { r_cons_printf ("{%s}\n", json_str + 1); } free (subvec_str); free (json_str); } break; case '?': // "afc?" default: r_core_cmd_help (core, help_msg_afc); } }break; case 'B': // "afB" // set function bits if (input[2] == ' ') { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, R_ANAL_FCN_TYPE_FCN | R_ANAL_FCN_TYPE_SYM); if (fcn) { int bits = atoi (input + 3); r_anal_hint_set_bits (core->anal, fcn->addr, bits); r_anal_hint_set_bits (core->anal, fcn->addr + r_anal_fcn_size (fcn), core->anal->bits); fcn->bits = bits; } else { eprintf ("Cannot find function to set bits\n"); } } else { eprintf ("Usage: afB [bits]\n"); } break; case 'b': // "afb" switch (input[2]) { case '-': // "afb-" anal_fcn_del_bb (core, input + 3); break; case 'e': // "afbe" anal_bb_edge (core, input + 3); break; case 0: case ' ': // "afb " case 'q': // "afbq" case 'r': // "afbr" case '*': // "afb*" case 'j': // "afbj" anal_fcn_list_bb (core, input + 2, false); break; case 'i': // "afbi" anal_fcn_list_bb (core, input + 2, true); break; case '.': // "afb." anal_fcn_list_bb (core, input[2]? " $$": input + 2, true); break; case '+': // "afb+" anal_fcn_add_bb (core, input + 3); break; default: case '?': r_core_cmd_help (core, help_msg_afb); break; } break; case 'n': // "afn" switch (input[2]) { case 's': // "afns" free (r_core_anal_fcn_autoname (core, core->offset, 1)); break; case 'a': // "afna" { char *name = r_core_anal_fcn_autoname (core, core->offset, 0); if (name) { r_cons_printf ("afn %s 0x%08" PFMT64x "\n", name, core->offset); free (name); } } break; case 0: // "afn" { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); if (fcn) { r_cons_printf ("%s\n", fcn->name); } } break; case ' ': // "afn " { ut64 off = core->offset; char *p, *name = strdup (input + 3); if ((p = strchr (name, ' '))) { *p++ = 0; off = r_num_math (core->num, p); } if (*name) { if (!setFunctionName (core, off, name, false)) { eprintf ("Cannot find function '%s' at 0x%08" PFMT64x "\n", name, off); } free (name); } else { eprintf ("Usage: afn newname [off] # set new name to given function\n"); free (name); } } break; default: r_core_cmd_help (core, help_msg_afn); break; } break; case 'S': // afS" { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); if (fcn) { fcn->maxstack = r_num_math (core->num, input + 3); //fcn->stack = fcn->maxstack; } } break; #if 0 /* this is undocumented and probably have no uses. plz discuss */ case 'e': // "afe" { RAnalFunction *fcn; ut64 off = core->offset; char *p, *name = strdup ((input[2]&&input[3])? input + 3: ""); if ((p = strchr (name, ' '))) { *p = 0; off = r_num_math (core->num, p + 1); } fcn = r_anal_get_fcn_in (core->anal, off, R_ANAL_FCN_TYPE_FCN | R_ANAL_FCN_TYPE_SYM); if (fcn) { RAnalBlock *b; RListIter *iter; RAnalRef *r; r_list_foreach (fcn->refs, iter, r) { r_cons_printf ("0x%08" PFMT64x " -%c 0x%08" PFMT64x "\n", r->at, r->type, r->addr); } r_list_foreach (fcn->bbs, iter, b) { int ok = 0; if (b->type == R_ANAL_BB_TYPE_LAST) ok = 1; if (b->type == R_ANAL_BB_TYPE_FOOT) ok = 1; if (b->jump == UT64_MAX && b->fail == UT64_MAX) ok = 1; if (ok) { r_cons_printf ("0x%08" PFMT64x " -r\n", b->addr); // TODO: check if destination is outside the function boundaries } } } else eprintf ("Cannot find function at 0x%08" PFMT64x "\n", core->offset); free (name); } break; #endif case 'x': // "afx" switch (input[2]) { case '\0': // "afx" case 'j': // "afxj" case ' ': // "afx " #if FCN_OLD if (input[2] == 'j') { r_cons_printf ("["); } // TODO: sdbize! // list xrefs from current address { ut64 addr = input[2]==' '? r_num_math (core->num, input + 2): core->offset; RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, R_ANAL_FCN_TYPE_NULL); if (fcn) { RAnalRef *ref; RListIter *iter; RList *refs = r_anal_fcn_get_refs (core->anal, fcn); r_list_foreach (refs, iter, ref) { if (input[2] == 'j') { r_cons_printf ("{\"type\":\"%c\",\"from\":%"PFMT64d",\"to\":%"PFMT64d"}%s", ref->type, ref->at, ref->addr, iter->n? ",": ""); } else { r_cons_printf ("%c 0x%08" PFMT64x " -> 0x%08" PFMT64x "\n", ref->type, ref->at, ref->addr); } } r_list_free (refs); } else { eprintf ("Cannot find function at 0x%08"PFMT64x"\n", addr); } } if (input[2] == 'j') { r_cons_printf ("]\n"); } #else #warning TODO_ FCNOLD sdbize xrefs here eprintf ("TODO\n"); #endif break; case 'c': // "afxc" add code xref case 'd': // "afxd" case 's': // "afxs" case 'C': { // "afxC" char *p; ut64 a, b; char *mi = strdup (input); if (mi && mi[3] == ' ' && (p = strchr (mi + 4, ' '))) { *p = 0; a = r_num_math (core->num, mi + 3); b = r_num_math (core->num, p + 1); r_anal_xrefs_set (core->anal, input[2], a, b); } else { r_core_cmd_help (core, help_msg_afx); } free (mi); } break; case '-': // "afx-" { char *p; ut64 a, b; char *mi = strdup (input + 3); if (mi && *mi == ' ' && (p = strchr (mi + 1, ' '))) { *p = 0; a = r_num_math (core->num, mi); b = r_num_math (core->num, p + 1); r_anal_xrefs_deln (core->anal, -1, a, b); } else { eprintf ("Usage: afx- [src] [dst]\n"); } free (mi); } break; default: case '?': // "afx?" r_core_cmd_help (core, help_msg_afx); break; } break; case 'F': // "afF" { int val = input[2] && r_num_math (core->num, input + 2); RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, R_ANAL_FCN_TYPE_NULL); if (fcn) { fcn->folded = input[2]? val: !fcn->folded; } } break; case '?': // "af?" r_core_cmd_help (core, help_msg_af); break; case 'r': // "afr" // analyze function recursively case ' ': // "af " case '\0': // "af" { char *uaddr = NULL, *name = NULL; int depth = r_config_get_i (core->config, "anal.depth"); bool analyze_recursively = r_config_get_i (core->config, "anal.calls"); RAnalFunction *fcn; ut64 addr = core->offset; if (input[1] == 'r') { input++; analyze_recursively = true; } // first undefine if (input[0] && input[1] == ' ') { name = strdup (input + 2); uaddr = strchr (name, ' '); if (uaddr) { *uaddr++ = 0; addr = r_num_math (core->num, uaddr); } // depth = 1; // or 1? // disable hasnext } //r_core_anal_undefine (core, core->offset); r_core_anal_fcn (core, addr, UT64_MAX, R_ANAL_REF_TYPE_NULL, depth); fcn = r_anal_get_fcn_in (core->anal, addr, 0); if (fcn && r_config_get_i (core->config, "anal.vars")) { fcn_callconv (core, fcn); } if (fcn) { /* ensure we use a proper name */ setFunctionName (core, addr, fcn->name, false); } if (analyze_recursively) { fcn = r_anal_get_fcn_in (core->anal, addr, 0); /// XXX wrong in case of nopskip if (fcn) { RAnalRef *ref; RListIter *iter; RList *refs = r_anal_fcn_get_refs (core->anal, fcn); r_list_foreach (refs, iter, ref) { if (ref->addr == UT64_MAX) { //eprintf ("Warning: ignore 0x%08"PFMT64x" call 0x%08"PFMT64x"\n", ref->at, ref->addr); continue; } if (ref->type != 'c' && ref->type != 'C') { /* only follow code/call references */ continue; } if (!r_io_is_valid_offset (core->io, ref->addr, !core->anal->opt.noncode)) { continue; } r_core_anal_fcn (core, ref->addr, fcn->addr, R_ANAL_REF_TYPE_CALL, depth); /* use recursivity here */ #if 1 RAnalFunction *f = r_anal_get_fcn_at (core->anal, ref->addr, 0); if (f) { RListIter *iter; RAnalRef *ref; RList *refs1 = r_anal_fcn_get_refs (core->anal, f); r_list_foreach (refs1, iter, ref) { if (!r_io_is_valid_offset (core->io, ref->addr, !core->anal->opt.noncode)) { continue; } if (ref->type != 'c' && ref->type != 'C') { continue; } r_core_anal_fcn (core, ref->addr, f->addr, R_ANAL_REF_TYPE_CALL, depth); // recursively follow fcn->refs again and again } r_list_free (refs1); } else { f = r_anal_get_fcn_in (core->anal, fcn->addr, 0); if (f) { /* cut function */ r_anal_fcn_resize (core->anal, f, addr - fcn->addr); r_core_anal_fcn (core, ref->addr, fcn->addr, R_ANAL_REF_TYPE_CALL, depth); f = r_anal_get_fcn_at (core->anal, fcn->addr, 0); } if (!f) { eprintf ("Cannot find function at 0x%08" PFMT64x "\n", fcn->addr); } } #endif } r_list_free (refs); } } if (name) { if (*name && !setFunctionName (core, addr, name, true)) { eprintf ("Cannot find function '%s' at 0x%08" PFMT64x "\n", name, (ut64)addr); } free (name); } flag_every_function (core); } default: return false; break; } return true; } // size: 0: bits; -1: any; >0: exact size static void __anal_reg_list(RCore *core, int type, int bits, char mode) { RReg *hack = core->dbg->reg; const char *use_color; int use_colors = r_config_get_i (core->config, "scr.color"); if (use_colors) { #undef ConsP #define ConsP(x) (core->cons && core->cons->pal.x)? core->cons->pal.x use_color = ConsP (creg) : Color_BWHITE; } else { use_color = NULL; } if (bits < 0) { // TODO Change the `size` argument of r_debug_reg_list to use -1 for any and 0 for anal->bits bits = 0; } else if (!bits) { bits = core->anal->bits; } if (core->anal) { core->dbg->reg = core->anal->reg; if (core->anal->cur && core->anal->cur->arch) { /* workaround for thumb */ if (!strcmp (core->anal->cur->arch, "arm") && bits == 16) { bits = 32; } /* workaround for 6502 */ if (!strcmp (core->anal->cur->arch, "6502") && bits == 8) { r_debug_reg_list (core->dbg, R_REG_TYPE_GPR, 16, mode, use_color); // XXX detect which one is current usage } if (!strcmp (core->anal->cur->arch, "avr") && bits == 8) { r_debug_reg_list (core->dbg, R_REG_TYPE_GPR, 16, mode, use_color); // XXX detect which one is current usage } } } if (mode == '=') { int pcbits = 0; const char *pcname = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); RRegItem *reg = r_reg_get (core->anal->reg, pcname, 0); if (bits != reg->size) { pcbits = reg->size; } if (pcbits) { r_debug_reg_list (core->dbg, R_REG_TYPE_GPR, pcbits, 2, use_color); // XXX detect which one is current usage } } r_debug_reg_list (core->dbg, type, bits, mode, use_color); core->dbg->reg = hack; } // XXX dup from drp :OOO void cmd_anal_reg(RCore *core, const char *str) { int size = 0, i, type = R_REG_TYPE_GPR; int bits = (core->anal->bits & R_SYS_BITS_64)? 64: 32; int use_colors = r_config_get_i (core->config, "scr.color"); struct r_reg_item_t *r; const char *use_color; const char *name; char *arg; if (use_colors) { #define ConsP(x) (core->cons && core->cons->pal.x)? core->cons->pal.x use_color = ConsP (creg) : Color_BWHITE; } else { use_color = NULL; } switch (str[0]) { case 'l': // "arl" { RRegSet *rs = r_reg_regset_get (core->anal->reg, R_REG_TYPE_GPR); if (rs) { RRegItem *r; RListIter *iter; r_list_foreach (rs->regs, iter, r) { r_cons_println (r->name); } } } break; case '0': // "ar0" r_reg_arena_zero (core->anal->reg); break; case 'C': // "arC" if (core->anal->reg->reg_profile_cmt) { r_cons_println (core->anal->reg->reg_profile_cmt); } break; case 'w': // "arw" switch (str[1]) { case '?': { r_core_cmd_help (core, help_msg_arw); break; } case ' ': r_reg_arena_set_bytes (core->anal->reg, str + 1); break; default: r_core_cmd_help (core, help_msg_arw); break; } break; case 'a': // "ara" switch (str[1]) { case '?': // "ara?" r_core_cmd_help (core, help_msg_ara); break; case 's': // "aras" r_reg_arena_swap (core->anal->reg, false); break; case '+': // "ara+" r_reg_arena_push (core->anal->reg); break; case '-': // "ara-" r_reg_arena_pop (core->anal->reg); break; default: { int i, j; RRegArena *a; RListIter *iter; for (i = 0; i < R_REG_TYPE_LAST; i++) { RRegSet *rs = &core->anal->reg->regset[i]; j = 0; r_list_foreach (rs->pool, iter, a) { r_cons_printf ("%s %p %d %d %s %d\n", (a == rs->arena)? "*": ".", a, i, j, r_reg_get_type (i), a->size); j++; } } } break; } break; case '?': // "ar?" if (str[1]) { ut64 off = r_reg_getv (core->anal->reg, str + 1); r_cons_printf ("0x%08" PFMT64x "\n", off); } else { r_core_cmd_help (core, help_msg_ar); } break; case 'r': // "arr" r_core_debug_rr (core, core->anal->reg); break; case 'S': { // "arS" int sz; ut8 *buf = r_reg_get_bytes ( core->anal->reg, R_REG_TYPE_GPR, &sz); r_cons_printf ("%d\n", sz); free (buf); } break; case 'b': { // "arb" WORK IN PROGRESS // DEBUG COMMAND int len, type = R_REG_TYPE_GPR; arg = strchr (str, ' '); if (arg) { char *string = r_str_trim (strdup (arg + 1)); if (string) { type = r_reg_type_by_name (string); if (type == -1 && string[0] != 'a') { type = R_REG_TYPE_GPR; } free (string); } } ut8 *buf = r_reg_get_bytes (core->dbg->reg, type, &len); if (buf) { //r_print_hexdump (core->print, 0LL, buf, len, 16, 16); r_print_hexdump (core->print, 0LL, buf, len, 32, 4, 1); free (buf); } } break; case 'c': // "arc" // TODO: set flag values with drc zf=1 { RRegItem *r; const char *name = str + 1; while (*name == ' ') name++; if (*name && name[1]) { r = r_reg_cond_get (core->dbg->reg, name); if (r) { r_cons_println (r->name); } else { int id = r_reg_cond_from_string (name); RRegFlags *rf = r_reg_cond_retrieve (core->dbg->reg, NULL); if (rf) { int o = r_reg_cond_bits (core->dbg->reg, id, rf); core->num->value = o; // ORLY? r_cons_printf ("%d\n", o); free (rf); } else { eprintf ("unknown conditional or flag register\n"); } } } else { RRegFlags *rf = r_reg_cond_retrieve (core->dbg->reg, NULL); if (rf) { r_cons_printf ("| s:%d z:%d c:%d o:%d p:%d\n", rf->s, rf->z, rf->c, rf->o, rf->p); if (*name == '=') { for (i = 0; i < R_REG_COND_LAST; i++) { r_cons_printf ("%s:%d ", r_reg_cond_to_string (i), r_reg_cond_bits (core->dbg->reg, i, rf)); } r_cons_newline (); } else { for (i = 0; i < R_REG_COND_LAST; i++) { r_cons_printf ("%d %s\n", r_reg_cond_bits (core->dbg->reg, i, rf), r_reg_cond_to_string (i)); } } free (rf); } } } break; case 's': // "ars" switch (str[1]) { case '-': // "ars-" r_reg_arena_pop (core->dbg->reg); // restore debug registers if in debugger mode r_debug_reg_sync (core->dbg, R_REG_TYPE_GPR, true); break; case '+': // "ars+" r_reg_arena_push (core->dbg->reg); break; case '?': { // "ars?" // TODO #7967 help refactor: dup from drp const char *help_msg[] = { "Usage:", "drs", " # Register states commands", "drs", "", "List register stack", "drs+", "", "Push register state", "drs-", "", "Pop register state", NULL }; r_core_cmd_help (core, help_msg); } break; default: r_cons_printf ("%d\n", r_list_length ( core->dbg->reg->regset[0].pool)); break; } break; case 'p': // "arp" // XXX we have to break out .h for these cmd_xxx files. cmd_reg_profile (core, 'a', str); break; case 't': // "art" for (i = 0; (name = r_reg_get_type (i)); i++) r_cons_println (name); break; case 'n': // "arn" if (*(str + 1) == '\0') { eprintf ("Oops. try arn [PC|SP|BP|A0|A1|A2|A3|A4|R0|R1|ZF|SF|NF|OF]\n"); break; } name = r_reg_get_name (core->dbg->reg, r_reg_get_name_idx (str + 2)); if (name && *name) { r_cons_println (name); } else { eprintf ("Oops. try arn [PC|SP|BP|A0|A1|A2|A3|A4|R0|R1|ZF|SF|NF|OF]\n"); } break; case 'd': // "ard" r_debug_reg_list (core->dbg, R_REG_TYPE_GPR, bits, 3, use_color); // XXX detect which one is current usage break; case 'o': // "aro" r_reg_arena_swap (core->dbg->reg, false); r_debug_reg_list (core->dbg, R_REG_TYPE_GPR, bits, 0, use_color); // XXX detect which one is current usage r_reg_arena_swap (core->dbg->reg, false); break; case '=': // "ar=" { char *p = NULL; char *bits = NULL; if (str[1]) { p = strdup (str + 1); if (str[1] != ':') { // Bits were specified bits = strtok (p, ":"); if (r_str_isnumber (bits)) { st64 sz = r_num_math (core->num, bits); if (sz > 0) { size = sz; } } else { r_core_cmd_help (core, help_msg_ar); break; } } int len = bits ? strlen (bits) : 0; if (str[len + 1] == ':') { // We have some regs char *regs = bits ? strtok (NULL, ":") : strtok ((char *)str + 1, ":"); char *reg = strtok (regs, " "); RList *q_regs = r_list_new (); if (q_regs) { while (reg) { r_list_append (q_regs, reg); reg = strtok (NULL, " "); } core->dbg->q_regs = q_regs; } } } __anal_reg_list (core, type, size, 2); if (!r_list_empty (core->dbg->q_regs)) { r_list_free (core->dbg->q_regs); } core->dbg->q_regs = NULL; free (p); } break; case '-': // "ar-" case '*': // "ar*" case 'R': // "arR" case 'j': // "arj" case '\0': // "ar" __anal_reg_list (core, type, size, str[0]); break; case ' ': { // "ar " arg = strchr (str + 1, '='); if (arg) { char *ostr, *regname; *arg = 0; ostr = r_str_trim (strdup (str + 1)); regname = r_str_trim_nc (ostr); r = r_reg_get (core->dbg->reg, regname, -1); if (!r) { int role = r_reg_get_name_idx (regname); if (role != -1) { const char *alias = r_reg_get_name (core->dbg->reg, role); r = r_reg_get (core->dbg->reg, alias, -1); } } if (r) { //eprintf ("%s 0x%08"PFMT64x" -> ", str, // r_reg_get_value (core->dbg->reg, r)); r_reg_set_value (core->dbg->reg, r, r_num_math (core->num, arg + 1)); r_debug_reg_sync (core->dbg, R_REG_TYPE_ALL, true); //eprintf ("0x%08"PFMT64x"\n", // r_reg_get_value (core->dbg->reg, r)); r_core_cmdf (core, ".dr*%d", bits); } else { eprintf ("ar: Unknown register '%s'\n", regname); } free (ostr); return; } char name[32]; int i = 1, j; while (str[i]) { if (str[i] == ',') { i++; } else { for (j = i; str[++j] && str[j] != ','; ); if (j - i + 1 <= sizeof name) { r_str_ncpy (name, str + i, j - i + 1); if (IS_DIGIT (name[0])) { // e.g. ar 32 __anal_reg_list (core, R_REG_TYPE_GPR, atoi (name), '\0'); } else if (showreg (core, name) > 0) { // e.g. ar rax } else { // e.g. ar gpr ; ar all type = r_reg_type_by_name (name); // TODO differentiate ALL and illegal register types and print error message for the latter __anal_reg_list (core, type, -1, '\0'); } } i = j; } } } } } static ut64 initializeEsil(RCore *core) { const char *name = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); RAnalEsil *esil = core->anal->esil; int romem = r_config_get_i (core->config, "esil.romem"); int stats = r_config_get_i (core->config, "esil.stats"); int iotrap = r_config_get_i (core->config, "esil.iotrap"); int exectrap = r_config_get_i (core->config, "esil.exectrap"); int stacksize = r_config_get_i (core->config, "esil.stack.depth"); int noNULL = r_config_get_i (core->config, "esil.noNULL"); unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); if (!(core->anal->esil = r_anal_esil_new (stacksize, iotrap, addrsize))) { return UT64_MAX; } ut64 addr; esil = core->anal->esil; r_anal_esil_setup (esil, core->anal, romem, stats, noNULL); // setup io esil->exectrap = exectrap; RList *entries = r_bin_get_entries (core->bin); RBinAddr *entry = NULL; RBinInfo *info = NULL; if (entries && !r_list_empty (entries)) { entry = (RBinAddr *)r_list_pop (entries); info = r_bin_get_info (core->bin); addr = info->has_va? entry->vaddr: entry->paddr; r_list_push (entries, entry); } else { addr = core->offset; } r_reg_setv (core->anal->reg, name, addr); // set memory read only return addr; } R_API int r_core_esil_step(RCore *core, ut64 until_addr, const char *until_expr, ut64 *prev_addr) { #define return_tail(x) { tail_return_value = x; goto tail_return; } int tail_return_value = 0; int ret; ut8 code[32]; RAnalOp op = {0}; RAnalEsil *esil = core->anal->esil; const char *name = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); if (!esil) { // TODO inititalizeEsil (core); int stacksize = r_config_get_i (core->config, "esil.stack.depth"); int iotrap = r_config_get_i (core->config, "esil.iotrap"); int romem = r_config_get_i (core->config, "esil.romem"); int stats = r_config_get_i (core->config, "esil.stats"); int noNULL = r_config_get_i (core->config, "esil.noNULL"); int verbose = r_config_get_i (core->config, "esil.verbose"); unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); if (!(esil = r_anal_esil_new (stacksize, iotrap, addrsize))) { return 0; } r_anal_esil_setup (esil, core->anal, romem, stats, noNULL); // setup io core->anal->esil = esil; esil->verbose = verbose; { const char *s = r_config_get (core->config, "cmd.esil.intr"); if (s) { char *my = strdup (s); if (my) { r_config_set (core->config, "cmd.esil.intr", my); free (my); } } } } esil->cmd = r_core_esil_cmd; ut64 addr = r_reg_getv (core->anal->reg, name); r_cons_break_push (NULL, NULL); repeat: if (r_cons_is_breaked ()) { eprintf ("[+] ESIL emulation interrupted at 0x%08" PFMT64x "\n", addr); return_tail (0); } if (!esil) { addr = initializeEsil (core); esil = core->anal->esil; if (!esil) { return_tail (0); } } else { esil->trap = 0; addr = r_reg_getv (core->anal->reg, name); //eprintf ("PC=0x%"PFMT64x"\n", (ut64)addr); } if (prev_addr) { *prev_addr = addr; } if (esil->exectrap) { if (!r_io_is_valid_offset (core->io, addr, R_IO_EXEC)) { esil->trap = R_ANAL_TRAP_EXEC_ERR; esil->trap_code = addr; eprintf ("[ESIL] Trap, trying to execute on non-executable memory\n"); return_tail (1); } } r_asm_set_pc (core->assembler, addr); // run esil pin command here const char *pincmd = r_anal_pin_call (core->anal, addr); if (pincmd) { r_core_cmd0 (core, pincmd); ut64 pc = r_debug_reg_get (core->dbg, "PC"); if (addr != pc) { return_tail (1); } } (void)r_io_read_at (core->io, addr, code, sizeof (code)); // TODO: sometimes this is dupe ret = r_anal_op (core->anal, &op, addr, code, sizeof (code), R_ANAL_OP_MASK_ALL); // if type is JMP then we execute the next N instructions // update the esil pointer because RAnal.op() can change it esil = core->anal->esil; if (op.size < 1 || ret < 0) { if (esil->cmd && esil->cmd_todo) { esil->cmd (esil, esil->cmd_todo, addr, 0); } op.size = 1; // avoid inverted stepping } { /* apply hint */ RAnalHint *hint = r_anal_hint_get (core->anal, addr); r_anal_op_hint (&op, hint); r_anal_hint_free (hint); } r_reg_setv (core->anal->reg, name, addr + op.size); if (ret) { r_anal_esil_set_pc (esil, addr); if (core->dbg->trace->enabled) { RReg *reg = core->dbg->reg; core->dbg->reg = core->anal->reg; r_debug_trace_pc (core->dbg, addr); core->dbg->reg = reg; } else { r_anal_esil_parse (esil, R_STRBUF_SAFEGET (&op.esil)); if (core->anal->cur && core->anal->cur->esil_post_loop) { core->anal->cur->esil_post_loop (esil, &op); } r_anal_esil_stack_free (esil); } // only support 1 slot for now if (op.delay) { ut8 code2[32]; ut64 naddr = addr + op.size; RAnalOp op2 = {0}; // emulate only 1 instruction r_anal_esil_set_pc (esil, naddr); (void)r_io_read_at (core->io, naddr, code2, sizeof (code2)); // TODO: sometimes this is dupe ret = r_anal_op (core->anal, &op2, naddr, code2, sizeof (code2), R_ANAL_OP_MASK_ALL); switch (op2.type) { case R_ANAL_OP_TYPE_CJMP: case R_ANAL_OP_TYPE_JMP: case R_ANAL_OP_TYPE_CRET: case R_ANAL_OP_TYPE_RET: // branches are illegal in a delay slot esil->trap = R_ANAL_TRAP_EXEC_ERR; esil->trap_code = addr; eprintf ("[ESIL] Trap, trying to execute a branch in a delay slot\n"); return_tail (1); break; } r_anal_esil_parse (esil, R_STRBUF_SAFEGET (&op2.esil)); r_anal_op_fini (&op2); } tail_return_value = 1; } st64 follow = (st64)r_config_get_i (core->config, "dbg.follow"); ut64 pc = r_debug_reg_get (core->dbg, "PC"); if (follow > 0) { if ((pc < core->offset) || (pc > (core->offset + follow))) { r_core_cmd0 (core, "sr PC"); } } // check addr if (until_addr != UT64_MAX) { if (r_reg_getv (core->anal->reg, name) == until_addr) { return_tail (0); } goto repeat; } // check esil if (esil && esil->trap) { if (core->anal->esil->verbose) { eprintf ("TRAP\n"); } return_tail (0); } if (until_expr) { if (r_anal_esil_condition (core->anal->esil, until_expr)) { if (core->anal->esil->verbose) { eprintf ("ESIL BREAK!\n"); } return_tail (0); } goto repeat; } tail_return: r_anal_op_fini (&op); r_cons_break_pop (); return tail_return_value; } R_API int r_core_esil_step_back(RCore *core) { RAnalEsil *esil = core->anal->esil; RListIter *tail; const char *name = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); ut64 prev = 0; ut64 end = r_reg_getv (core->anal->reg, name); if (!esil || !(tail = r_list_tail (esil->sessions))) { return 0; } RAnalEsilSession *before = (RAnalEsilSession *) tail->data; if (!before) { eprintf ("Cannot find any previous state here\n"); return 0; } eprintf ("NOTE: step back in esil is setting an initial state and stepping into pc is the same.\n"); eprintf ("NOTE: this is extremely wrong and poorly efficient. so don't use this feature unless\n"); eprintf ("NOTE: you are going to fix it by making it consistent with dts, which is also broken as hell\n"); eprintf ("Execute until 0x%08"PFMT64x"\n", end); r_anal_esil_session_set (esil, before); r_core_esil_step (core, end, NULL, &prev); eprintf ("Before 0x%08"PFMT64x"\n", prev); r_anal_esil_session_set (esil, before); r_core_esil_step (core, prev, NULL, NULL); return 1; } static void cmd_address_info(RCore *core, const char *addrstr, int fmt) { ut64 addr, type; if (!addrstr || !*addrstr) { addr = core->offset; } else { addr = r_num_math (core->num, addrstr); } type = r_core_anal_address (core, addr); int isp = 0; switch (fmt) { case 'j': #define COMMA isp++? ",": "" r_cons_printf ("{"); if (type & R_ANAL_ADDR_TYPE_PROGRAM) r_cons_printf ("%s\"program\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_LIBRARY) r_cons_printf ("%s\"library\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_EXEC) r_cons_printf ("%s\"exec\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_READ) r_cons_printf ("%s\"read\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_WRITE) r_cons_printf ("%s\"write\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_FLAG) r_cons_printf ("%s\"flag\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_FUNC) r_cons_printf ("%s\"func\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_STACK) r_cons_printf ("%s\"stack\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_HEAP) r_cons_printf ("%s\"heap\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_REG) r_cons_printf ("%s\"reg\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_ASCII) r_cons_printf ("%s\"ascii\":true", COMMA); if (type & R_ANAL_ADDR_TYPE_SEQUENCE) r_cons_printf ("%s\"sequence\":true", COMMA); r_cons_print ("}"); break; default: if (type & R_ANAL_ADDR_TYPE_PROGRAM) r_cons_printf ("program\n"); if (type & R_ANAL_ADDR_TYPE_LIBRARY) r_cons_printf ("library\n"); if (type & R_ANAL_ADDR_TYPE_EXEC) r_cons_printf ("exec\n"); if (type & R_ANAL_ADDR_TYPE_READ) r_cons_printf ("read\n"); if (type & R_ANAL_ADDR_TYPE_WRITE) r_cons_printf ("write\n"); if (type & R_ANAL_ADDR_TYPE_FLAG) r_cons_printf ("flag\n"); if (type & R_ANAL_ADDR_TYPE_FUNC) r_cons_printf ("func\n"); if (type & R_ANAL_ADDR_TYPE_STACK) r_cons_printf ("stack\n"); if (type & R_ANAL_ADDR_TYPE_HEAP) r_cons_printf ("heap\n"); if (type & R_ANAL_ADDR_TYPE_REG) r_cons_printf ("reg\n"); if (type & R_ANAL_ADDR_TYPE_ASCII) r_cons_printf ("ascii\n"); if (type & R_ANAL_ADDR_TYPE_SEQUENCE) r_cons_printf ("sequence\n"); } } static void cmd_anal_info(RCore *core, const char *input) { switch (input[0]) { case '?': eprintf ("Usage: ai @ rsp\n"); break; case ' ': cmd_address_info (core, input, 0); break; case 'j': // "aij" cmd_address_info (core, input + 1, 'j'); break; default: cmd_address_info (core, NULL, 0); break; } } static void initialize_stack (RCore *core, ut64 addr, ut64 size) { const char *mode = r_config_get (core->config, "esil.fillstack"); if (mode && *mode && *mode != '0') { const int bs = 4096 * 32; ut64 i; for (i = 0; i < size; i += bs) { int left = R_MIN (bs, size - i); // r_core_cmdf (core, "wx 10203040 @ 0x%llx", addr); switch (*mode) { case 'd': // "debrujn" r_core_cmdf (core, "wopD %"PFMT64d" @ 0x%"PFMT64x, left, addr + i); break; case 's': // "seq" r_core_cmdf (core, "woe 1 0xff 1 4 @ 0x%"PFMT64x"!0x%"PFMT64x, addr + i, left); break; case 'r': // "random" r_core_cmdf (core, "woR %"PFMT64d" @ 0x%"PFMT64x"!0x%"PFMT64x, left, addr + i, left); break; case 'z': // "zero" case '0': r_core_cmdf (core, "wow 00 @ 0x%"PFMT64x"!0x%"PFMT64x, addr + i, left); break; } } // eprintf ("[*] Initializing ESIL stack with pattern\n"); // r_core_cmdf (core, "woe 0 10 4 @ 0x%"PFMT64x, size, addr); } } static void cmd_esil_mem(RCore *core, const char *input) { RAnalEsil *esil = core->anal->esil; RIOMap *stack_map; ut64 curoff = core->offset; const char *patt = ""; ut64 addr = 0x100000; ut32 size = 0xf0000; char name[128]; RFlagItem *fi; const char *sp, *pc; char uri[32]; char nomalloc[256]; char *p; if (!esil) { int stacksize = r_config_get_i (core->config, "esil.stack.depth"); int iotrap = r_config_get_i (core->config, "esil.iotrap"); int romem = r_config_get_i (core->config, "esil.romem"); int stats = r_config_get_i (core->config, "esil.stats"); int noNULL = r_config_get_i (core->config, "esil.noNULL"); int verbose = r_config_get_i (core->config, "esil.verbose"); unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); if (!(esil = r_anal_esil_new (stacksize, iotrap, addrsize))) { return; } r_anal_esil_setup (esil, core->anal, romem, stats, noNULL); // setup io core->anal->esil = esil; esil->verbose = verbose; { const char *s = r_config_get (core->config, "cmd.esil.intr"); if (s) { char *my = strdup (s); if (my) { r_config_set (core->config, "cmd.esil.intr", my); free (my); } } } } if (*input == '?') { eprintf ("Usage: aeim [addr] [size] [name] - initialize ESIL VM stack\n"); eprintf ("Default: 0x100000 0xf0000\n"); eprintf ("See ae? for more help\n"); return; } if (input[0] == 'p') { fi = r_flag_get (core->flags, "aeim.stack"); if (fi) { addr = fi->offset; size = fi->size; } else { cmd_esil_mem (core, ""); } if (esil) { esil->stack_addr = addr; esil->stack_size = size; } initialize_stack (core, addr, size); return; } if (!*input) { RFlagItem *fi = r_flag_get (core->flags, "aeim.fd"); if (fi) { // Close the fd associated with the aeim stack (void)r_io_fd_close (core->io, fi->offset); } } addr = r_config_get_i (core->config, "esil.stack.addr"); size = r_config_get_i (core->config, "esil.stack.size"); patt = r_config_get (core->config, "esil.stack.pattern"); p = strncpy (nomalloc, input, 255); if ((p = strchr (p, ' '))) { while (*p == ' ') p++; addr = r_num_math (core->num, p); if ((p = strchr (p, ' '))) { while (*p == ' ') p++; size = (ut32)r_num_math (core->num, p); if (size < 1) { size = 0xf0000; } if ((p = strchr (p, ' '))) { while (*p == ' ') p++; snprintf (name, sizeof (name), "mem.%s", p); } else { snprintf (name, sizeof (name), "mem.0x%" PFMT64x "_0x%x", addr, size); } } else { snprintf (name, sizeof (name), "mem.0x%" PFMT64x "_0x%x", addr, size); } } else { snprintf (name, sizeof (name), "mem.0x%" PFMT64x "_0x%x", addr, size); } if (*input == '-') { if (esil->stack_fd > 2) { //0, 1, 2 are reserved for stdio/stderr r_io_fd_close (core->io, esil->stack_fd); // no need to kill the maps, r_io_map_cleanup does that for us in the close esil->stack_fd = 0; } else { eprintf ("Cannot deinitialize %s\n", name); } r_flag_unset_name (core->flags, name); // eprintf ("Deinitialized %s\n", name); return; } snprintf (uri, sizeof (uri), "malloc://%d", (int)size); esil->stack_fd = r_io_fd_open (core->io, uri, R_IO_RW, 0); if (!(stack_map = r_io_map_add (core->io, esil->stack_fd, R_IO_RW, 0LL, addr, size, true))) { r_io_fd_close (core->io, esil->stack_fd); eprintf ("Cannot create map for tha stack, fd %d got closed again\n", esil->stack_fd); esil->stack_fd = 0; return; } r_io_map_set_name (stack_map, name); // r_flag_set (core->flags, name, addr, size); //why is this here? r_flag_set (core->flags, "aeim.stack", addr, size); r_flag_set (core->flags, "aeim.fd", esil->stack_fd, 1); r_config_set_i (core->config, "io.va", true); if (patt && *patt) { switch (*patt) { case '0': // do nothing break; case 'd': r_core_cmdf (core, "wopD %d @ 0x%"PFMT64x, size, addr); break; case 'i': r_core_cmdf (core, "woe 0 255 1 @ 0x%"PFMT64x"!%d",addr, size); break; case 'w': r_core_cmdf (core, "woe 0 0xffff 1 4 @ 0x%"PFMT64x"!%d",addr, size); break; } } // SP sp = r_reg_get_name (core->dbg->reg, R_REG_NAME_SP); r_debug_reg_set (core->dbg, sp, addr + (size / 2)); // BP sp = r_reg_get_name (core->dbg->reg, R_REG_NAME_BP); r_debug_reg_set (core->dbg, sp, addr + (size / 2)); // PC pc = r_reg_get_name (core->dbg->reg, R_REG_NAME_PC); r_debug_reg_set (core->dbg, pc, curoff); r_core_cmd0 (core, ".ar*"); #if 0 if (!r_io_section_get_name (core->io, ESIL_STACK_NAME)) { r_core_cmdf (core, "om %d 0x%"PFMT64x, cf->fd, addr); r_core_cmdf (core, "S 0x%"PFMT64x" 0x%"PFMT64x" %d %d " ESIL_STACK_NAME, addr, addr, size, size); } #endif if (esil) { esil->stack_addr = addr; esil->stack_size = size; } initialize_stack (core, addr, size); r_core_seek (core, curoff, 0); } #if 0 static ut64 opc = UT64_MAX; static ut8 *regstate = NULL; static void esil_init (RCore *core) { const char *pc = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); int noNULL = r_config_get_i (core->config, "esil.noNULL"); opc = r_reg_getv (core->anal->reg, pc); if (!opc || opc==UT64_MAX) { opc = core->offset; } if (!core->anal->esil) { int iotrap = r_config_get_i (core->config, "esil.iotrap"); ut64 stackSize = r_config_get_i (core->config, "esil.stack.size"); unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); if (!(core->anal->esil = r_anal_esil_new (stackSize, iotrap, addrsize))) { R_FREE (regstate); return; } r_anal_esil_setup (core->anal->esil, core->anal, 0, 0, noNULL); } free (regstate); regstate = r_reg_arena_peek (core->anal->reg); } static void esil_fini(RCore *core) { const char *pc = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); r_reg_arena_poke (core->anal->reg, regstate); r_reg_setv (core->anal->reg, pc, opc); R_FREE (regstate); } #endif typedef struct { RList *regs; RList *regread; RList *regwrite; RList *inputregs; } AeaStats; static void aea_stats_init (AeaStats *stats) { stats->regs = r_list_newf (free); stats->regread = r_list_newf (free); stats->regwrite = r_list_newf (free); stats->inputregs = r_list_newf (free); } static void aea_stats_fini (AeaStats *stats) { R_FREE (stats->regs); R_FREE (stats->regread); R_FREE (stats->regwrite); R_FREE (stats->inputregs); } static bool contains(RList *list, const char *name) { RListIter *iter; const char *n; r_list_foreach (list, iter, n) { if (!strcmp (name, n)) return true; } return false; } static char *oldregread = NULL; static RList *mymemxsr = NULL; static RList *mymemxsw = NULL; #define R_NEW_DUP(x) memcpy((void*)malloc(sizeof(x)), &(x), sizeof(x)) typedef struct { ut64 addr; int size; } AeaMemItem; static int mymemwrite(RAnalEsil *esil, ut64 addr, const ut8 *buf, int len) { RListIter *iter; AeaMemItem *n; r_list_foreach (mymemxsw, iter, n) { if (addr == n->addr) { return len; } } if (!r_io_is_valid_offset (esil->anal->iob.io, addr, 0)) { return false; } n = R_NEW (AeaMemItem); if (n) { n->addr = addr; n->size = len; r_list_push (mymemxsw, n); } return len; } static int mymemread(RAnalEsil *esil, ut64 addr, ut8 *buf, int len) { RListIter *iter; AeaMemItem *n; r_list_foreach (mymemxsr, iter, n) { if (addr == n->addr) { return len; } } if (!r_io_is_valid_offset (esil->anal->iob.io, addr, 0)) { return false; } n = R_NEW (AeaMemItem); if (n) { n->addr = addr; n->size = len; r_list_push (mymemxsr, n); } return len; } static int myregwrite(RAnalEsil *esil, const char *name, ut64 *val) { AeaStats *stats = esil->user; if (oldregread && !strcmp (name, oldregread)) { r_list_pop (stats->regread); R_FREE (oldregread) } if (!IS_DIGIT (*name)) { if (!contains (stats->regs, name)) { r_list_push (stats->regs, strdup (name)); } if (!contains (stats->regwrite, name)) { r_list_push (stats->regwrite, strdup (name)); } } return 0; } static int myregread(RAnalEsil *esil, const char *name, ut64 *val, int *len) { AeaStats *stats = esil->user; if (!IS_DIGIT (*name)) { if (!contains (stats->inputregs, name)) { if (!contains (stats->regwrite, name)) { r_list_push (stats->inputregs, strdup (name)); } } if (!contains (stats->regs, name)) { r_list_push (stats->regs, strdup (name)); } if (!contains (stats->regread, name)) { r_list_push (stats->regread, strdup (name)); } } return 0; } static void showregs (RList *list) { if (!r_list_empty (list)) { char *reg; RListIter *iter; r_list_foreach (list, iter, reg) { r_cons_print (reg); if (iter->n) { r_cons_printf (" "); } } } r_cons_newline(); } static void showregs_json (RList *list) { r_cons_printf ("["); if (!r_list_empty (list)) { char *reg; RListIter *iter; r_list_foreach (list, iter, reg) { r_cons_printf ("\"%s\"", reg); if (iter->n) { r_cons_printf (","); } } } r_cons_printf ("]"); } static bool cmd_aea(RCore* core, int mode, ut64 addr, int length) { RAnalEsil *esil; int ptr, ops, ops_end = 0, len, buf_sz, maxopsize; ut64 addr_end; AeaStats stats; const char *esilstr; RAnalOp aop = R_EMPTY; ut8 *buf; RList* regnow; if (!core) { return false; } maxopsize = r_anal_archinfo (core->anal, R_ANAL_ARCHINFO_MAX_OP_SIZE); if (maxopsize < 1) { maxopsize = 16; } if (mode & 1) { // number of bytes / length buf_sz = length; } else { // number of instructions / opcodes ops_end = length; if (ops_end < 1) { ops_end = 1; } buf_sz = ops_end * maxopsize; } if (buf_sz < 1) { buf_sz = maxopsize; } addr_end = addr + buf_sz; buf = malloc (buf_sz); if (!buf) { return false; } (void)r_io_read_at (core->io, addr, (ut8 *)buf, buf_sz); aea_stats_init (&stats); //esil_init (core); //esil = core->anal->esil; r_reg_arena_push (core->anal->reg); int stacksize = r_config_get_i (core->config, "esil.stack.depth"); bool iotrap = r_config_get_i (core->config, "esil.iotrap"); int romem = r_config_get_i (core->config, "esil.romem"); int stats1 = r_config_get_i (core->config, "esil.stats"); int noNULL = r_config_get_i (core->config, "esil.noNULL"); unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); esil = r_anal_esil_new (stacksize, iotrap, addrsize); r_anal_esil_setup (esil, core->anal, romem, stats1, noNULL); // setup io # define hasNext(x) (x&1) ? (addr<addr_end) : (ops<ops_end) mymemxsr = r_list_new (); mymemxsw = r_list_new (); esil->user = &stats; esil->cb.hook_reg_write = myregwrite; esil->cb.hook_reg_read = myregread; esil->cb.hook_mem_write = mymemwrite; esil->cb.hook_mem_read = mymemread; esil->nowrite = true; for (ops = ptr = 0; ptr < buf_sz && hasNext (mode); ops++, ptr += len) { len = r_anal_op (core->anal, &aop, addr + ptr, buf + ptr, buf_sz - ptr, R_ANAL_OP_MASK_ALL); esilstr = R_STRBUF_SAFEGET (&aop.esil); if (len < 1) { eprintf ("Invalid 0x%08"PFMT64x" instruction %02x %02x\n", addr + ptr, buf[ptr], buf[ptr + 1]); break; } r_anal_esil_parse (esil, esilstr); r_anal_esil_stack_free (esil); } esil->nowrite = false; esil->cb.hook_reg_write = NULL; esil->cb.hook_reg_read = NULL; //esil_fini (core); r_anal_esil_free (esil); r_reg_arena_pop (core->anal->reg); regnow = r_list_newf (free); { RListIter *iter; char *reg; r_list_foreach (stats.regs, iter, reg) { if (!contains (stats.regwrite, reg)) { r_list_push (regnow, strdup (reg)); } } } if ((mode >> 5) & 1) { RListIter *iter; AeaMemItem *n; int c = 0; r_cons_printf ("f-mem.*\n"); r_list_foreach (mymemxsr, iter, n) { r_cons_printf ("f mem.read.%d 0x%08x @ 0x%08"PFMT64x"\n", c++, n->size, n->addr); } c = 0; r_list_foreach (mymemxsw, iter, n) { r_cons_printf ("f mem.write.%d 0x%08x @ 0x%08"PFMT64x"\n", c++, n->size, n->addr); } } /* show registers used */ if ((mode >> 1) & 1) { showregs (stats.regread); } else if ((mode >> 2) & 1) { showregs (stats.regwrite); } else if ((mode >> 3) & 1) { showregs (regnow); } else if ((mode >> 4) & 1) { r_cons_printf ("{\"A\":"); showregs_json (stats.regs); r_cons_printf (",\"I\":"); showregs_json (stats.inputregs); r_cons_printf (",\"R\":"); showregs_json (stats.regread); r_cons_printf (",\"W\":"); showregs_json (stats.regwrite); r_cons_printf (",\"N\":"); showregs_json (regnow); r_cons_printf ("}"); r_cons_newline(); } else if ((mode >> 5) & 1) { // nothing } else { r_cons_printf (" I: "); showregs (stats.inputregs); r_cons_printf (" A: "); showregs (stats.regs); r_cons_printf (" R: "); showregs (stats.regread); r_cons_printf (" W: "); showregs (stats.regwrite); r_cons_printf ("NW: "); if (r_list_length (regnow)) { showregs (regnow); } else { r_cons_newline(); } RListIter *iter; ut64 *n; if (!r_list_empty (mymemxsr)) { r_cons_printf ("@R:"); r_list_foreach (mymemxsr, iter, n) { r_cons_printf (" 0x%08"PFMT64x, *n); } r_cons_newline (); } if (!r_list_empty (mymemxsw)) { r_cons_printf ("@W:"); r_list_foreach (mymemxsw, iter, n) { r_cons_printf (" 0x%08"PFMT64x, *n); } r_cons_newline (); } } r_list_free (mymemxsr); r_list_free (mymemxsw); mymemxsr = NULL; mymemxsw = NULL; aea_stats_fini (&stats); free (buf); R_FREE (regnow); return true; } static void cmd_aespc(RCore *core, ut64 addr, int off) { RAnalEsil *esil = core->anal->esil; int i, j = 0; int instr_size = 0; ut8 *buf; RAnalOp aop = {0}; int ret , bsize = R_MAX (64, core->blocksize); const int mininstrsz = r_anal_archinfo (core->anal, R_ANAL_ARCHINFO_MIN_OP_SIZE); const int minopcode = R_MAX (1, mininstrsz); const char *pc = r_reg_get_name (core->dbg->reg, R_REG_NAME_PC); RRegItem *r = r_reg_get (core->dbg->reg, pc, -1); int stacksize = r_config_get_i (core->config, "esil.stack.depth"); int iotrap = r_config_get_i (core->config, "esil.iotrap"); unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); if (!esil) { if (!(esil = r_anal_esil_new (stacksize, iotrap, addrsize))) { return; } } buf = malloc (bsize); if (!buf) { eprintf ("Cannot allocate %d byte(s)\n", bsize); free (buf); return; } if (addr == -1) { addr = r_debug_reg_get (core->dbg, pc); } ut64 curpc = addr; ut64 oldoff = core->offset; for (i = 0, j = 0; j < off ; i++, j++) { if (r_cons_is_breaked ()) { break; } if (i >= (bsize - 32)) { i = 0; } if (!i) { r_core_read_at (core, addr, buf, bsize); } ret = r_anal_op (core->anal, &aop, addr, buf + i, bsize - i, R_ANAL_OP_MASK_ALL); instr_size += ret; int inc = (core->search->align > 0)? core->search->align - 1: ret - 1; if (inc < 0) { inc = minopcode; } i += inc; addr += inc; r_anal_op_fini (&aop); } r_reg_set_value (core->dbg->reg, r, curpc); r_core_esil_step (core, curpc + instr_size, NULL, NULL); r_core_seek (core, oldoff, 1); } static void cmd_anal_esil(RCore *core, const char *input) { RAnalEsil *esil = core->anal->esil; ut64 addr = core->offset; ut64 adr ; char *n, *n1; int off; int stacksize = r_config_get_i (core->config, "esil.stack.depth"); int iotrap = r_config_get_i (core->config, "esil.iotrap"); int romem = r_config_get_i (core->config, "esil.romem"); int stats = r_config_get_i (core->config, "esil.stats"); int noNULL = r_config_get_i (core->config, "esil.noNULL"); ut64 until_addr = UT64_MAX; unsigned int addrsize = r_config_get_i (core->config, "esil.addr.size"); const char *until_expr = NULL; RAnalOp *op; switch (input[0]) { case 'p': // "aep" switch (input[1]) { case 'c': if (input[2] == ' ') { // seek to this address r_core_cmdf (core, "ar PC=%s", input + 3); r_core_cmd0 (core, ".ar*"); } else { eprintf ("Missing argument\n"); } break; case 0: r_anal_pin_list (core->anal); break; case '-': if (input[2]) addr = r_num_math (core->num, input + 2); r_anal_pin_unset (core->anal, addr); break; case ' ': r_anal_pin (core->anal, addr, input + 2); break; default: r_core_cmd_help (core, help_msg_aep); break; } break; case 'r': // "aer" // 'aer' is an alias for 'ar' cmd_anal_reg (core, input + 1); break; case '*': // XXX: this is wip, not working atm if (core->anal->esil) { r_cons_printf ("trap: %d\n", core->anal->esil->trap); r_cons_printf ("trap-code: %d\n", core->anal->esil->trap_code); } else { eprintf ("esil vm not initialized. run `aei`\n"); } break; case ' ': //r_anal_esil_eval (core->anal, input+1); if (!esil) { if (!(core->anal->esil = esil = r_anal_esil_new (stacksize, iotrap, addrsize))) return; } r_anal_esil_setup (esil, core->anal, romem, stats, noNULL); // setup io r_anal_esil_set_pc (esil, core->offset); r_anal_esil_parse (esil, input + 1); r_anal_esil_dumpstack (esil); r_anal_esil_stack_free (esil); break; case 's': // "aes" // "aes" "aeso" "aesu" "aesue" // aes -> single step // aesb -> single step back // aeso -> single step over // aesu -> until address // aesue -> until esil expression switch (input[1]) { case '?': eprintf ("See: ae?~aes\n"); break; case 'l': // "aesl" { ut64 pc = r_debug_reg_get (core->dbg, "PC"); RAnalOp *op = r_core_anal_op (core, pc); // TODO: honor hint if (!op) { break; } r_core_esil_step (core, UT64_MAX, NULL, NULL); r_debug_reg_set (core->dbg, "PC", pc + op->size); r_anal_esil_set_pc (esil, pc + op->size); r_core_cmd0 (core, ".ar*"); } break; case 'b': // "aesb" if (!r_core_esil_step_back (core)) { eprintf ("cannnot step back\n"); } r_core_cmd0 (core, ".ar*"); break; case 'u': // "aesu" if (input[2] == 'e') { until_expr = input + 3; } else { until_addr = r_num_math (core->num, input + 2); } r_core_esil_step (core, until_addr, until_expr, NULL); r_core_cmd0 (core, ".ar*"); break; case 'o': // "aeso" // step over op = r_core_anal_op (core, r_reg_getv (core->anal->reg, r_reg_get_name (core->anal->reg, R_REG_NAME_PC))); if (op && op->type == R_ANAL_OP_TYPE_CALL) { until_addr = op->addr + op->size; } r_core_esil_step (core, until_addr, until_expr, NULL); r_anal_op_free (op); r_core_cmd0 (core, ".ar*"); break; case 'p': //"aesp" n = strchr (input, ' '); n1 = n ? strchr (n + 1, ' ') : NULL; if ((!n || !n1) || (!(n + 1) || !(n1 + 1))) { eprintf ("aesp [offset] [num]\n"); break; } adr = r_num_math (core->num, n + 1); off = r_num_math (core->num, n1 + 1); cmd_aespc (core, adr, off); break; case ' ': n = strchr (input, ' '); if (!(n + 1)) { r_core_esil_step (core, until_addr, until_expr, NULL); break; } off = r_num_math (core->num, n + 1); cmd_aespc (core, -1, off); break; default: r_core_esil_step (core, until_addr, until_expr, NULL); r_core_cmd0 (core, ".ar*"); break; } break; case 'c': // "aec" if (input[1] == '?') { // "aec?" r_core_cmd_help (core, help_msg_aec); } else if (input[1] == 's') { // "aecs" const char *pc = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); ut64 newaddr; int ret; for (;;) { op = r_core_anal_op (core, addr); if (!op) { break; } if (op->type == R_ANAL_OP_TYPE_SWI) { eprintf ("syscall at 0x%08" PFMT64x "\n", addr); break; } if (op->type == R_ANAL_OP_TYPE_TRAP) { eprintf ("trap at 0x%08" PFMT64x "\n", addr); break; } ret = r_core_esil_step (core, UT64_MAX, NULL, NULL); r_anal_op_free (op); op = NULL; if (core->anal->esil->trap || core->anal->esil->trap_code) { break; } if (!ret) break; r_core_cmd0 (core, ".ar*"); newaddr = r_num_get (core->num, pc); if (addr == newaddr) { addr++; break; } else { addr = newaddr; } } if (op) { r_anal_op_free (op); } } else { // "aec" -> continue until ^C // "aecu" -> until address // "aecue" -> until esil expression if (input[1] == 'u' && input[2] == 'e') until_expr = input + 3; else if (input[1] == 'u') until_addr = r_num_math (core->num, input + 2); else until_expr = "0"; r_core_esil_step (core, until_addr, until_expr, NULL); r_core_cmd0 (core, ".ar*"); } break; case 'i': // "aei" switch (input[1]) { case 's': case 'm': // "aeim" cmd_esil_mem (core, input + 2); break; case 'p': // initialize pc = $$ r_core_cmd0 (core, "ar PC=$$"); break; case '?': cmd_esil_mem (core, "?"); break; case '-': if (esil) { sdb_reset (esil->stats); } r_anal_esil_free (esil); core->anal->esil = NULL; break; case 0: //lolololol r_anal_esil_free (esil); // reinitialize { const char *pc = r_reg_get_name (core->anal->reg, R_REG_NAME_PC); if (r_reg_getv (core->anal->reg, pc) == 0LL) { r_core_cmd0 (core, "ar PC=$$"); } } if (!(esil = core->anal->esil = r_anal_esil_new (stacksize, iotrap, addrsize))) { return; } r_anal_esil_setup (esil, core->anal, romem, stats, noNULL); // setup io esil->verbose = (int)r_config_get_i (core->config, "esil.verbose"); /* restore user settings for interrupt handling */ { const char *s = r_config_get (core->config, "cmd.esil.intr"); if (s) { char *my = strdup (s); if (my) { r_config_set (core->config, "cmd.esil.intr", my); free (my); } } } break; } break; case 'k': // "aek" switch (input[1]) { case '\0': input = "123*"; /* fall through */ case ' ': if (esil && esil->stats) { char *out = sdb_querys (esil->stats, NULL, 0, input + 2); if (out) { r_cons_println (out); free (out); } } else { eprintf ("esil.stats is empty. Run 'aei'\n"); } break; case '-': if (esil) { sdb_reset (esil->stats); } break; } break; case 'f': // "aef" { RListIter *iter; RAnalBlock *bb; RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, R_ANAL_FCN_TYPE_FCN | R_ANAL_FCN_TYPE_SYM); if (fcn) { // emulate every instruction in the function recursively across all the basic blocks r_list_foreach (fcn->bbs, iter, bb) { ut64 pc = bb->addr; ut64 end = bb->addr + bb->size; RAnalOp op; ut8 *buf; int ret, bbs = end - pc; if (bbs < 1 || bbs > 0xfffff) { eprintf ("Invalid block size\n"); } // eprintf ("[*] Emulating 0x%08"PFMT64x" basic block 0x%08" PFMT64x " - 0x%08" PFMT64x "\r[", fcn->addr, pc, end); buf = calloc (1, bbs + 1); r_io_read_at (core->io, pc, buf, bbs); int left; while (pc < end) { left = R_MIN (end - pc, 32); r_asm_set_pc (core->assembler, pc); ret = r_anal_op (core->anal, &op, addr, buf, left, R_ANAL_OP_MASK_ALL); // read overflow if (ret) { r_reg_set_value_by_role (core->anal->reg, R_REG_NAME_PC, pc); r_anal_esil_parse (esil, R_STRBUF_SAFEGET (&op.esil)); r_anal_esil_dumpstack (esil); r_anal_esil_stack_free (esil); pc += op.size; } else { pc += 4; // XXX } } } } else { eprintf ("Cannot find function at 0x%08" PFMT64x "\n", core->offset); } } break; case 't': // "aet" switch (input[1]) { case 'r': // "aetr" { // anal ESIL to REIL. RAnalEsil *esil = r_anal_esil_new (stacksize, iotrap, addrsize); if (!esil) { return; } r_anal_esil_to_reil_setup (esil, core->anal, romem, stats); r_anal_esil_set_pc (esil, core->offset); r_anal_esil_parse (esil, input + 2); r_anal_esil_dumpstack (esil); r_anal_esil_free (esil); break; } case 's': // "aets" switch (input[2]) { case 0: r_anal_esil_session_list (esil); break; case '+': r_anal_esil_session_add (esil); break; default: r_core_cmd_help (core, help_msg_aets); break; } break; default: eprintf ("Unknown command. Use `aetr`.\n"); break; } break; case 'A': // "aeA" if (input[1] == '?') { r_core_cmd_help (core, help_msg_aea); } else if (input[1] == 'r') { cmd_aea (core, 1 + (1<<1), core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'w') { cmd_aea (core, 1 + (1<<2), core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'n') { cmd_aea (core, 1 + (1<<3), core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'j') { cmd_aea (core, 1 + (1<<4), core->offset, r_num_math (core->num, input+2)); } else if (input[1] == '*') { cmd_aea (core, 1 + (1<<5), core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'f') { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); if (fcn) { cmd_aea (core, 1, fcn->addr, r_anal_fcn_size (fcn)); } } else { cmd_aea (core, 1, core->offset, (int)r_num_math (core->num, input+2)); } break; case 'a': // "aea" if (input[1] == '?') { r_core_cmd_help (core, help_msg_aea); } else if (input[1] == 'r') { cmd_aea (core, 1<<1, core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'w') { cmd_aea (core, 1<<2, core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'n') { cmd_aea (core, 1<<3, core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'j') { cmd_aea (core, 1<<4, core->offset, r_num_math (core->num, input+2)); } else if (input[1] == '*') { cmd_aea (core, 1<<5, core->offset, r_num_math (core->num, input+2)); } else if (input[1] == 'f') { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); // "aeafj" if (fcn) { switch (input[2]) { case 'j': // "aeafj" cmd_aea (core, 1<<4, fcn->addr, r_anal_fcn_size (fcn)); break; default: cmd_aea (core, 1, fcn->addr, r_anal_fcn_size (fcn)); break; } break; } } else { const char *arg = input[1]? input + 2: ""; ut64 len = r_num_math (core->num, arg); cmd_aea (core, 0, core->offset, len); } break; case 'x': { // "aex" char *hex; int ret, bufsz; input = r_str_trim_ro (input + 1); hex = strdup (input); if (!hex) { break; } RAnalOp aop = R_EMPTY; bufsz = r_hex_str2bin (hex, (ut8*)hex); ret = r_anal_op (core->anal, &aop, core->offset, (const ut8*)hex, bufsz, R_ANAL_OP_MASK_ALL); if (ret>0) { const char *str = R_STRBUF_SAFEGET (&aop.esil); char *str2 = r_str_newf (" %s", str); cmd_anal_esil (core, str2); free (str2); } r_anal_op_fini (&aop); break; } case '?': // "ae?" if (input[1] == '?') { r_core_cmd_help (core, help_detail_ae); break; } /* fallthrough */ default: r_core_cmd_help (core, help_msg_ae); break; } } static void cmd_anal_bytes(RCore *core, const char *input) { int len = core->blocksize; int tbs = len; if (input[0]) { len = (int)r_num_get (core->num, input + 1); if (len > tbs) { r_core_block_size (core, len); } } core_anal_bytes (core, core->block, len, 0, input[0]); if (tbs != core->blocksize) { r_core_block_size (core, tbs); } } static void cmd_anal_opcode(RCore *core, const char *input) { int l, len = core->blocksize; ut32 tbs = core->blocksize; switch (input[0]) { case '?': r_core_cmd_help (core, help_msg_ao); break; case 's': // "aos" case 'j': // "aoj" case 'e': // "aoe" case 'r': { int count = 1; if (input[1] && input[2]) { l = (int)r_num_get (core->num, input + 1); if (l > 0) { count = l; } if (l > tbs) { r_core_block_size (core, l * 4); // len = l; } } else { len = l = core->blocksize; count = 1; } core_anal_bytes (core, core->block, len, count, input[0]); } break; case '*': r_core_anal_hint_list (core->anal, input[0]); break; default: { int count = 0; if (input[0]) { l = (int)r_num_get (core->num, input + 1); if (l > 0) { count = l; } if (l > tbs) { r_core_block_size (core, l * 4); //len = l; } } else { len = l = core->blocksize; count = 1; } core_anal_bytes (core, core->block, len, count, 0); break; } } } static void cmd_anal_jumps(RCore *core, const char *input) { r_core_cmdf (core, "af @@= `ax~ref.code.jmp[1]`"); } // TODO: cleanup to reuse code static void cmd_anal_aftertraps(RCore *core, const char *input) { int bufi, minop = 1; // 4 ut8 *buf; RBinFile *binfile; RAnalOp op; ut64 addr, addr_end; ut64 len = r_num_math (core->num, input); if (len > 0xffffff) { eprintf ("Too big\n"); return; } binfile = r_core_bin_cur (core); if (!binfile) { eprintf ("cur binfile NULL\n"); return; } addr = core->offset; if (!len) { // ignore search.in to avoid problems. analysis != search RIOSection *sec = r_io_section_vget (core->io, addr); if (sec && sec->flags & 1) { // search in current section if (sec->size > binfile->size) { addr = sec->vaddr; if (binfile->size > sec->paddr) { len = binfile->size - sec->paddr; } else { eprintf ("Opps something went wrong aac\n"); return; } } else { addr = sec->vaddr; len = sec->size; } } else { if (sec && sec->vaddr != sec->paddr && binfile->size > (core->offset - sec->vaddr + sec->paddr)) { len = binfile->size - (core->offset - sec->vaddr + sec->paddr); } else { if (binfile->size > core->offset) { len = binfile->size - core->offset; } else { eprintf ("Oops invalid range\n"); len = 0; } } } } addr_end = addr + len; if (!(buf = malloc (4096))) { return; } bufi = 0; int trapcount = 0; int nopcount = 0; r_cons_break_push (NULL, NULL); while (addr < addr_end) { if (r_cons_is_breaked ()) { break; } // TODO: too many ioreads here if (bufi > 4000) { bufi = 0; } if (!bufi) { r_io_read_at (core->io, addr, buf, 4096); } if (r_anal_op (core->anal, &op, addr, buf + bufi, 4096 - bufi, R_ANAL_OP_MASK_ALL)) { if (op.size < 1) { // XXX must be +4 on arm/mips/.. like we do in disasm.c op.size = minop; } if (op.type == R_ANAL_OP_TYPE_TRAP) { trapcount ++; } else if (op.type == R_ANAL_OP_TYPE_NOP) { nopcount ++; } else { if (nopcount > 1) { r_cons_printf ("af @ 0x%08"PFMT64x"\n", addr); nopcount = 0; } if (trapcount > 0) { r_cons_printf ("af @ 0x%08"PFMT64x"\n", addr); trapcount = 0; } } } else { op.size = minop; } addr += (op.size > 0)? op.size : 1; bufi += (op.size > 0)? op.size : 1; r_anal_op_fini (&op); } r_cons_break_pop (); free (buf); } static void cmd_anal_blocks(RCore *core, const char *input) { ut64 from , to; char *arg = strchr (input, ' '); r_cons_break_push (NULL, NULL); #if 0 ls_foreach (core->io->sections, iter, s) { /* is executable */ if (!(s->flags & R_IO_EXEC)) { continue; } min = s->vaddr; max = s->vaddr + s->vsize; r_core_cmdf (core, "abb%s 0x%08"PFMT64x" @ 0x%08"PFMT64x, input, (max - min), min); if (r_cons_is_breaked ()) { goto ctrl_c; } } if (ls_empty (core->io->sections)) { min = core->offset; max = 0xffff + min; r_core_cmdf (core, "abb%s 0x%08"PFMT64x" @ 0x%08"PFMT64x, input, (max - min), min); if (r_cons_is_breaked ()) { goto ctrl_c; } } #endif if (!arg) { RList *list = r_core_get_boundaries_prot (core, R_IO_EXEC, NULL, "anal"); RListIter *iter; RIOMap* map; r_list_foreach (list, iter, map) { from = map->itv.addr; to = r_itv_end (map->itv); if (r_cons_is_breaked ()) { goto ctrl_c; } if (!from && !to) { eprintf ("Cannot determine search boundaries\n"); } else if (to - from > UT32_MAX) { eprintf ("Skipping huge range\n"); } else { r_core_cmdf (core, "abb 0x%08"PFMT64x" @ 0x%08"PFMT64x, (to - from), from); } } } else { int sz = r_num_math (core->num, arg + 1); r_core_cmdf (core, "abb 0x%08"PFMT64x" @ 0x%08"PFMT64x, sz, core->offset); } ctrl_c: r_cons_break_pop (); } static void _anal_calls(RCore *core, ut64 addr, ut64 addr_end) { RAnalOp op; int bufi; int depth = r_config_get_i (core->config, "anal.depth"); const int addrbytes = core->io->addrbytes; const int bsz = 4096; ut8 *buf; ut8 *block; bufi = 0; if (addr_end - addr > UT32_MAX) { return; } buf = malloc (bsz); block = malloc (bsz); if (!buf || !block) { eprintf ("Error: cannot allocate buf or block\n"); free (buf); free (block); return; } int minop = r_anal_archinfo (core->anal, R_ANAL_ARCHINFO_MIN_OP_SIZE); if (minop < 1) { minop = 1; } while (addr < addr_end) { if (r_cons_is_breaked ()) { break; } // TODO: too many ioreads here if (bufi > 4000) { bufi = 0; } if (!bufi) { r_io_read_at (core->io, addr, buf, bsz); } memset (block, -1, bsz); if (!memcmp (buf, block, bsz)) { //eprintf ("Error: skipping uninitialized block \n"); addr += bsz; continue; } memset (block, 0, bsz); if (!memcmp (buf, block, bsz)) { //eprintf ("Error: skipping uninitialized block \n"); addr += bsz; continue; } if (r_anal_op (core->anal, &op, addr, buf + bufi, bsz - bufi, 0) > 0) { if (op.size < 1) { op.size = minop; } if (op.type == R_ANAL_OP_TYPE_CALL) { #if JAYRO_03 #error FUCK if (!anal_is_bad_call (core, from, to, addr, buf, bufi)) { fcn = r_anal_get_fcn_in (core->anal, op.jump, R_ANAL_FCN_TYPE_ROOT); if (!fcn) { r_core_anal_fcn (core, op.jump, addr, R_ANAL_REF_TYPE_NULL, depth); } } #else // add xref here r_anal_xrefs_set (core->anal, R_ANAL_REF_TYPE_CALL, addr, op.jump); if (r_io_is_valid_offset (core->io, op.jump, 1)) { r_core_anal_fcn (core, op.jump, addr, R_ANAL_REF_TYPE_NULL, depth); } #endif } } else { op.size = minop; } if ((int)op.size < 1) { op.size = minop; } addr += op.size; bufi += addrbytes * op.size; r_anal_op_fini (&op); } free (buf); free (block); } static void cmd_anal_calls(RCore *core, const char *input, bool only_print_flag) { RList *ranges = NULL; RIOMap *r; RBinFile *binfile; ut64 addr; ut64 len = r_num_math (core->num, input); if (len > 0xffffff) { eprintf ("Too big\n"); return; } binfile = r_core_bin_cur (core); addr = core->offset; if (binfile) { if (len) { RIOMap *m = R_NEW0 (RIOMap); m->itv.addr = addr; m->itv.size = len; r_list_append (ranges, m); } else { ranges = r_core_get_boundaries_prot (core, R_IO_EXEC, NULL, "anal"); } } r_cons_break_push (NULL, NULL); if (!binfile || !r_list_length (ranges)) { RListIter *iter; RIOMap *map; r_list_free (ranges); ranges = r_core_get_boundaries_prot (core, 0, NULL, "anal"); r_list_foreach (ranges, iter, map) { ut64 addr = map->itv.addr; if (only_print_flag) { r_cons_printf ("f fcn.0x%08"PFMT64x" %d 0x%08"PFMT64x"\n", addr, map->itv.size, addr); } else { _anal_calls (core, addr, r_itv_end (map->itv)); } } } else { RListIter *iter; if (binfile) { r_list_foreach (ranges, iter, r) { addr = r->itv.addr; //this normally will happen on fuzzed binaries, dunno if with huge //binaries as well if (r_cons_is_breaked ()) { break; } if (only_print_flag) { r_cons_printf ("f fcn.0x%08"PFMT64x" %d 0x%08"PFMT64x"\n", addr, r->itv.size, addr); } else { _anal_calls (core, addr, r_itv_end (r->itv)); } } } } r_cons_break_pop (); r_list_free (ranges); } static void cmd_asf(RCore *core, const char *input) { char *ret; if (input[0] == ' ') { ret = sdb_querys (core->anal->sdb_fcnsign, NULL, 0, input + 1); } else { ret = sdb_querys (core->anal->sdb_fcnsign, NULL, 0, "*"); } if (ret && *ret) { r_cons_println (ret); } free (ret); } static void cmd_anal_syscall(RCore *core, const char *input) { RSyscallItem *si; RListIter *iter; RList *list; RNum *num = NULL; char *out; int n; switch (input[0]) { case 'c': // "asc" if (input[1] == 'a') { if (input[2] == ' ') { if (!isalpha (input[3]) && (n = r_num_math (num, input + 3)) >= 0 ) { si = r_syscall_get (core->anal->syscall, n, -1); if (si) r_cons_printf (".equ SYS_%s %d\n", si->name, n); else eprintf ("Unknown syscall number\n"); } else { n = r_syscall_get_num (core->anal->syscall, input + 3); if (n != -1) { r_cons_printf (".equ SYS_%s %d\n", input + 3, n); } else { eprintf ("Unknown syscall name\n"); } } } else { list = r_syscall_list (core->anal->syscall); r_list_foreach (list, iter, si) { r_cons_printf (".equ SYS_%s %d\n", si->name, (ut32)si->num); } r_list_free (list); } } else { if (input[1] == ' ') { if (!isalpha (input[2]) && (n = r_num_math (num, input + 2)) >= 0 ) { si = r_syscall_get (core->anal->syscall, n, -1); if (si) r_cons_printf ("#define SYS_%s %d\n", si->name, n); else eprintf ("Unknown syscall number\n"); } else { n = r_syscall_get_num (core->anal->syscall, input + 2); if (n != -1) { r_cons_printf ("#define SYS_%s %d\n", input + 2, n); } else { eprintf ("Unknown syscall name\n"); } } } else { list = r_syscall_list (core->anal->syscall); r_list_foreach (list, iter, si) { r_cons_printf ("#define SYS_%s %d\n", si->name, (ut32)si->num); } r_list_free (list); } } break; case 'f': // "asf" cmd_asf (core, input + 1); break; case 'l': // "asl" if (input[1] == ' ') { if (!isalpha (input[2]) && (n = r_num_math (num, input + 2)) >= 0 ) { si = r_syscall_get (core->anal->syscall, n, -1); if (si) r_cons_println (si->name); else eprintf ("Unknown syscall number\n"); } else { n = r_syscall_get_num (core->anal->syscall, input + 2); if (n != -1) { r_cons_printf ("%d\n", n); } else { eprintf ("Unknown syscall name\n"); } } } else { list = r_syscall_list (core->anal->syscall); r_list_foreach (list, iter, si) { r_cons_printf ("%s = 0x%02x.%u\n", si->name, (ut32)si->swi, (ut32)si->num); } r_list_free (list); } break; case 'j': // "asj" list = r_syscall_list (core->anal->syscall); r_cons_printf ("["); r_list_foreach (list, iter, si) { r_cons_printf ("{\"name\":\"%s\"," "\"swi\":\"%d\",\"num\":\"%d\"}", si->name, si->swi, si->num); if (iter->n) { r_cons_printf (","); } } r_cons_printf ("]\n"); r_list_free (list); // JSON support break; case '\0': cmd_syscall_do (core, -1); //n); break; case ' ': cmd_syscall_do (core, (int)r_num_get (core->num, input + 1)); break; case 'k': // "ask" if (input[1] == ' ') { out = sdb_querys (core->anal->syscall->db, NULL, 0, input + 2); if (out) { r_cons_println (out); free (out); } } else eprintf ("|ERROR| Usage: ask [query]\n"); break; default: case '?': r_core_cmd_help (core, help_msg_as); break; } } static void anal_axg (RCore *core, const char *input, int level, Sdb *db, int opts) { char arg[32], pre[128]; RList *xrefs; RListIter *iter; RAnalRef *ref; ut64 addr = core->offset; int is_json = opts & R_CORE_ANAL_JSON; if (input && *input) { addr = r_num_math (core->num, input); } int spaces = (level + 1) * 2; if (spaces > sizeof (pre) - 4) { spaces = sizeof (pre) - 4; } memset (pre, ' ', sizeof (pre)); strcpy (pre+spaces, "- "); xrefs = r_anal_xrefs_get (core->anal, addr); if (!r_list_empty (xrefs)) { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, -1); if (fcn) { if (is_json) { r_cons_printf ("{\"%"PFMT64d"\":{\"type\":\"fcn\"," "\"fcn_addr\":%"PFMT64d",\"name\":\"%s\",\"refs\":[", addr, fcn->addr, fcn->name); } else { //if (sdb_add (db, fcn->name, "1", 0)) { r_cons_printf ("%s0x%08"PFMT64x" fcn 0x%08"PFMT64x" %s\n", pre + 2, addr, fcn->addr, fcn->name); //} } } else { if (is_json) { r_cons_printf ("{\"%"PFMT64d"\":{\"refs\":[", addr); } else { //snprintf (arg, sizeof (arg), "0x%08"PFMT64x, addr); //if (sdb_add (db, arg, "1", 0)) { r_cons_printf ("%s0x%08"PFMT64x"\n", pre+2, addr); //} } } } r_list_foreach (xrefs, iter, ref) { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, ref->addr, -1); if (fcn) { if (is_json) { if (level == 0) { r_cons_printf ("{\"%"PFMT64d"\":{\"type\":\"fcn\",\"fcn_addr\": %"PFMT64d",\"name\":\"%s\",\"refs\":[", ref->addr, fcn->addr, fcn->name); } else { r_cons_printf ("]}},{\"%"PFMT64d"\":{\"type\":\"fcn\",\"fcn_addr\": %"PFMT64d",\"name\":\"%s\",\"refs\":[", ref->addr, fcn->addr, fcn->name); } } else { r_cons_printf ("%s0x%08"PFMT64x" fcn 0x%08"PFMT64x" %s\n", pre, ref->addr, fcn->addr, fcn->name); } if (sdb_add (db, fcn->name, "1", 0)) { snprintf (arg, sizeof (arg), "0x%08"PFMT64x, fcn->addr); anal_axg (core, arg, level+1, db, opts); } else { if (is_json) { r_cons_printf("]}}"); } } if (is_json) { if (iter->n) { r_cons_printf (","); } } } else { if (is_json) { r_cons_printf ("{\"%"PFMT64d"\":{\"type\":\"???\",\"refs\":[", ref->addr); } else { r_cons_printf ("%s0x%08"PFMT64x" ???\n", pre, ref->addr); } snprintf (arg, sizeof (arg), "0x%08"PFMT64x, ref->addr); if (sdb_add (db, arg, "1", 0)) { anal_axg (core, arg, level +1, db, opts); } else { if (is_json) { r_cons_printf("]}}"); } } if (is_json) { if (iter->n) { r_cons_printf (","); } } } } if (is_json) { r_cons_printf("]}}"); if (level == 0) { r_cons_printf("\n"); } } r_list_free (xrefs); } static void cmd_anal_ucall_ref (RCore *core, ut64 addr) { RAnalFunction * fcn = r_anal_get_fcn_at (core->anal, addr, R_ANAL_FCN_TYPE_NULL); if (fcn) { r_cons_printf (" ; %s", fcn->name); } else { r_cons_printf (" ; 0x%" PFMT64x, addr); } } static bool cmd_anal_refs(RCore *core, const char *input) { ut64 addr = core->offset; switch (input[0]) { case '-': { // "ax-" RList *list; RListIter *iter; RAnalRef *ref; char *cp_inp = strdup (input + 1); char *ptr = r_str_trim_head (cp_inp); if (!strcmp (ptr, "*")) { r_anal_xrefs_init (core->anal); } else { int n = r_str_word_set0 (ptr); ut64 from = UT64_MAX, to = UT64_MAX; switch (n) { case 2: from = r_num_math (core->num, r_str_word_get0 (ptr, 1)); //fall through case 1: // get addr to = r_num_math (core->num, r_str_word_get0 (ptr, 0)); break; default: to = core->offset; break; } list = r_anal_xrefs_get (core->anal, to); if (list) { r_list_foreach (list, iter, ref) { if (from != UT64_MAX && from == ref->addr) { r_anal_ref_del (core->anal, ref->addr, ref->at); } if (from == UT64_MAX) { r_anal_ref_del (core->anal, ref->addr, ref->at); } } r_list_free (list); } } free (cp_inp); } break; case 'g': // "axg" { Sdb *db = sdb_new0 (); if(input[1] == '\0') { anal_axg (core, input[1] ? input + 2 : NULL, 0, db, 0); } else if(input[1] == 'j') { anal_axg (core, input[1] ? input + 2 : NULL, 0, db, R_CORE_ANAL_JSON); } sdb_free (db); } break; case 'k': // "axk" if (input[1] == '?') { eprintf ("Usage: axk [query]\n"); } else if (input[1] == ' ') { sdb_query (core->anal->sdb_xrefs, input + 2); } else { r_core_anal_ref_list (core, 'k'); } break; case '\0': // "ax" case 'j': // "axj" case 'q': // "axq" case '*': // "ax*" r_core_anal_ref_list (core, input[0]); break; case 't': { // "axt" const int size = 12; RList *list; RAnalFunction *fcn; RAnalRef *ref; RListIter *iter; ut8 buf[12]; RAsmOp asmop; char *buf_asm = NULL; char *space = strchr (input, ' '); if (space) { addr = r_num_math (core->num, space + 1); } else { addr = core->offset; } list = r_anal_xrefs_get (core->anal, addr); if (list) { if (input[1] == 'q') { // "axtq" r_list_foreach (list, iter, ref) { r_cons_printf ("0x%" PFMT64x "\n", ref->addr); } } else if (input[1] == 'j') { // "axtj" bool asm_varsub = r_config_get_i (core->config, "asm.varsub"); core->parser->relsub = r_config_get_i (core->config, "asm.relsub"); core->parser->localvar_only = r_config_get_i (core->config, "asm.varsub_only"); r_cons_printf ("["); r_list_foreach (list, iter, ref) { r_core_read_at (core, ref->addr, buf, size); r_asm_set_pc (core->assembler, ref->addr); r_asm_disassemble (core->assembler, &asmop, buf, size); char str[512]; fcn = r_anal_get_fcn_in (core->anal, ref->addr, 0); if (asm_varsub) { r_parse_varsub (core->parser, fcn, ref->addr, asmop.size, asmop.buf_asm, asmop.buf_asm, sizeof (asmop.buf_asm)); } r_parse_filter (core->parser, core->flags, asmop.buf_asm, str, sizeof (str), core->print->big_endian); r_cons_printf ("{\"from\":%" PFMT64u ",\"type\":\"%s\",\"opcode\":\"%s\"", ref->addr, r_anal_ref_to_string (ref->type), str); if (fcn) { r_cons_printf (",\"fcn_addr\":%"PFMT64d",\"fcn_name\":\"%s\"", fcn->addr, fcn->name); } RFlagItem *fi = r_flag_get_at (core->flags, fcn? fcn->addr: ref->addr, true); if (fi) { if (fcn && strcmp (fcn->name, fi->name)) { r_cons_printf (",\"flag\":\"%s\"", fi->name); } if (fi->realname && strcmp (fi->name, fi->realname)) { r_cons_printf (",\"realname\":\"%s\"", fi->realname); } } r_cons_printf ("}%s", iter->n? ",": ""); } r_cons_printf ("]"); r_cons_newline (); } else if (input[1] == 'g') { // axtg r_list_foreach (list, iter, ref) { char *str = r_core_cmd_strf (core, "fd 0x%"PFMT64x, ref->addr); if (!str) { str = strdup ("?\n"); } r_str_trim_tail (str); r_cons_printf ("agn 0x%" PFMT64x " \"%s\"\n", ref->addr, str); free (str); } if (input[2] != '*') { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, addr, 0); r_cons_printf ("agn 0x%" PFMT64x " \"%s\"\n", addr, fcn?fcn->name: "$$"); } r_list_foreach (list, iter, ref) { r_cons_printf ("age 0x%" PFMT64x " 0x%"PFMT64x"\n", ref->addr, addr); } } else if (input[1] == '*') { // axt* // TODO: implement multi-line comments r_list_foreach (list, iter, ref) r_cons_printf ("CCa 0x%" PFMT64x " \"XREF type %d at 0x%" PFMT64x"%s\n", ref->addr, ref->type, addr, iter->n? ",": ""); } else { // axt int has_color = core->print->flags & R_PRINT_FLAGS_COLOR; char str[512]; RAnalFunction *fcn; char *comment; bool asm_varsub = r_config_get_i (core->config, "asm.varsub"); core->parser->relsub = r_config_get_i (core->config, "asm.relsub"); core->parser->localvar_only = r_config_get_i (core->config, "asm.varsub_only"); if (core->parser->relsub) { core->parser->relsub_addr = addr; } r_list_foreach (list, iter, ref) { r_core_read_at (core, ref->addr, buf, size); r_asm_set_pc (core->assembler, ref->addr); r_asm_disassemble (core->assembler, &asmop, buf, size); fcn = r_anal_get_fcn_in (core->anal, ref->addr, 0); if (asm_varsub) { r_parse_varsub (core->parser, fcn, ref->addr, asmop.size, asmop.buf_asm, asmop.buf_asm, sizeof (asmop.buf_asm)); } r_parse_filter (core->parser, core->flags, asmop.buf_asm, str, sizeof (str), core->print->big_endian); if (has_color) { buf_asm = r_print_colorize_opcode (core->print, str, core->cons->pal.reg, core->cons->pal.num, false); } else { buf_asm = r_str_new (str); } comment = r_meta_get_string (core->anal, R_META_TYPE_COMMENT, ref->addr); char *buf_fcn = comment ? r_str_newf ("%s; %s", fcn ? fcn->name : "(nofunc)", strtok (comment, "\n")) : r_str_newf ("%s", fcn ? fcn->name : "(nofunc)"); r_cons_printf ("%s 0x%" PFMT64x " [%s] %s\n", buf_fcn, ref->addr, r_anal_ref_to_string (ref->type), buf_asm); free (buf_asm); free (buf_fcn); } } r_list_free (list); } else { if (input[1] == 'j') { // "axtj" r_cons_print ("[]\n"); } } } break; case 'f': { // "axf" ut8 buf[12]; RAsmOp asmop; char *buf_asm = NULL; RList *list, *list_ = NULL; RAnalRef *ref; RListIter *iter; char *space = strchr (input, ' '); if (space) { addr = r_num_math (core->num, space + 1); } else { addr = core->offset; } if (input[1] == '.') { // axf. list = list_ = r_anal_xrefs_get_from (core->anal, addr); if (!list) { RAnalFunction * fcn = r_anal_get_fcn_in (core->anal, addr, 0); list = r_anal_fcn_get_refs (core->anal, fcn); } } else { list = r_anal_refs_get (core->anal, addr); } if (list) { if (input[1] == 'q') { // axfq r_list_foreach (list, iter, ref) { r_cons_printf ("0x%" PFMT64x "\n", ref->at); } } else if (input[1] == 'j') { // axfj r_cons_print ("["); r_list_foreach (list, iter, ref) { r_core_read_at (core, ref->at, buf, 12); r_asm_set_pc (core->assembler, ref->at); r_asm_disassemble (core->assembler, &asmop, buf, 12); r_cons_printf ("{\"from\":%" PFMT64d ",\"to\":%" PFMT64d ",\"type\":\"%s\",\"opcode\":\"%s\"}%s", ref->at, ref->addr, r_anal_ref_to_string (ref->type), asmop.buf_asm, iter->n? ",": ""); } r_cons_print ("]\n"); } else if (input[1] == '*') { // axf* // TODO: implement multi-line comments r_list_foreach (list, iter, ref) { r_cons_printf ("CCa 0x%" PFMT64x " \"XREF from 0x%" PFMT64x "\n", ref->at, ref->type, asmop.buf_asm, iter->n? ",": ""); } } else { // axf char str[512]; int has_color = core->print->flags & R_PRINT_FLAGS_COLOR; r_list_foreach (list, iter, ref) { r_core_read_at (core, ref->at, buf, 12); r_asm_set_pc (core->assembler, ref->at); r_asm_disassemble (core->assembler, &asmop, buf, 12); r_parse_filter (core->parser, core->flags, asmop.buf_asm, str, sizeof (str), core->print->big_endian); if (has_color) { buf_asm = r_print_colorize_opcode (core->print, str, core->cons->pal.reg, core->cons->pal.num, false); } else { buf_asm = r_str_new (str); } r_cons_printf ("%c 0x%" PFMT64x " %s", ref->type, ref->at, buf_asm); if (ref->type == R_ANAL_REF_TYPE_CALL) { RAnalOp aop; r_anal_op (core->anal, &aop, ref->at, buf, 12, R_ANAL_OP_MASK_ALL); if (aop.type == R_ANAL_OP_TYPE_UCALL) { cmd_anal_ucall_ref (core, ref->addr); } } r_cons_newline (); free (buf_asm); } } r_list_free (list_); r_list_free (list); } else { if (input[1] == 'j') { // axfj r_cons_print ("[]\n"); } } } break; case 'F': find_refs (core, input + 1); break; case 'C': // "axC" case 'c': // "axc" case 'd': // "axd" case ' ': // "ax " { char *ptr = strdup (r_str_trim_head ((char *)input + 1)); int n = r_str_word_set0 (ptr); ut64 at = core->offset; ut64 addr = UT64_MAX; switch (n) { case 2: // get at at = r_num_math (core->num, r_str_word_get0 (ptr, 1)); /* fall through */ case 1: // get addr addr = r_num_math (core->num, r_str_word_get0 (ptr, 0)); break; default: free (ptr); return false; } r_anal_xrefs_set (core->anal, input[0], at, addr); free (ptr); } break; default: case '?': r_core_cmd_help (core, help_msg_ax); break; } return true; } static void cmd_anal_hint(RCore *core, const char *input) { switch (input[0]) { case '?': if (input[1]) { ut64 addr = r_num_math (core->num, input + 1); r_core_anal_hint_print (core->anal, addr, 0); } else { r_core_cmd_help (core, help_msg_ah); } break; case '.': // "ah." r_core_anal_hint_print (core->anal, core->offset, 0); break; case 'a': // "aha" set arch if (input[1]) { int i; char *ptr = strdup (input + 2); i = r_str_word_set0 (ptr); if (i == 2) { r_num_math (core->num, r_str_word_get0 (ptr, 1)); } r_anal_hint_set_arch (core->anal, core->offset, r_str_word_get0 (ptr, 0)); free (ptr); } else if (input[1] == '-') { r_anal_hint_unset_arch (core->anal, core->offset); } else { eprintf ("Missing argument\n"); } break; case 'b': // "ahb" set bits if (input[1]) { char *ptr = strdup (input + 2); int bits; int i = r_str_word_set0 (ptr); if (i == 2) { r_num_math (core->num, r_str_word_get0 (ptr, 1)); } bits = r_num_math (core->num, r_str_word_get0 (ptr, 0)); r_anal_hint_set_bits (core->anal, core->offset, bits); free (ptr); } else if (input[1] == '-') { r_anal_hint_unset_bits (core->anal, core->offset); } else { eprintf ("Missing argument\n"); } break; case 'i': // "ahi" if (input[1] == '?') { r_core_cmd_help (core, help_msg_ahi); } else if (input[1] == ' ') { // You can either specify immbase with letters, or numbers const int base = (input[2] == 's') ? 1 : (input[2] == 'b') ? 2 : (input[2] == 'p') ? 3 : (input[2] == 'o') ? 8 : (input[2] == 'd') ? 10 : (input[2] == 'h') ? 16 : (input[2] == 'i') ? 32 : // ip address (input[2] == 'S') ? 80 : // syscall (int) r_num_math (core->num, input + 1); r_anal_hint_set_immbase (core->anal, core->offset, base); } else if (input[1] == '-') { // "ahi-" r_anal_hint_set_immbase (core->anal, core->offset, 0); } else { eprintf ("|ERROR| Usage: ahi [base]\n"); } break; case 'h': // "ahh" if (input[1] == '-') { r_anal_hint_unset_high (core->anal, core->offset); } else if (input[1] == ' ') { r_anal_hint_set_high (core->anal, r_num_math (core->num, input + 1)); } else { r_anal_hint_set_high (core->anal, core->offset); } break; case 'c': // "ahc" if (input[1] == ' ') { r_anal_hint_set_jump ( core->anal, core->offset, r_num_math (core->num, input + 1)); } else if (input[1] == '-') { r_anal_hint_unset_jump (core->anal, core->offset); } break; case 'f': // "ahf" if (input[1] == ' ') { r_anal_hint_set_fail ( core->anal, core->offset, r_num_math (core->num, input + 1)); } else if (input[1] == '-') { r_anal_hint_unset_fail (core->anal, core->offset); } break; case 's': // "ahs" set size (opcode length) if (input[1] == ' ') { r_anal_hint_set_size (core->anal, core->offset, atoi (input + 1)); } else if (input[1] == '-') { r_anal_hint_unset_size (core->anal, core->offset); } else { eprintf ("Usage: ahs 16\n"); } break; case 'S': // "ahS" set size (opcode length) if (input[1] == ' ') { r_anal_hint_set_syntax (core->anal, core->offset, input + 2); } else if (input[1] == '-') { r_anal_hint_unset_syntax (core->anal, core->offset); } else { eprintf ("Usage: ahS att\n"); } break; case 'o': // "aho" set opcode string if (input[1] == ' ') { r_anal_hint_set_opcode (core->anal, core->offset, input + 2); } else if (input[1] == '-') { r_anal_hint_unset_opcode (core->anal, core->offset); } else { eprintf ("Usage: aho popall\n"); } break; case 'e': // "ahe" set ESIL string if (input[1] == ' ') { r_anal_hint_set_esil (core->anal, core->offset, input + 2); } else if (input[1] == '-') { r_anal_hint_unset_esil (core->anal, core->offset); } else { eprintf ("Usage: ahe r0,pc,=\n"); } break; #if 0 case 'e': // set endian if (input[1] == ' ') { r_anal_hint_set_opcode (core->anal, core->offset, atoi (input + 1)); } else if (input[1] == '-') { r_anal_hint_unset_opcode (core->anal, core->offset); } break; #endif case 'p': // "ahp" if (input[1] == ' ') { r_anal_hint_set_pointer (core->anal, core->offset, r_num_math (core->num, input + 1)); } else if (input[1] == '-') { // "ahp-" r_anal_hint_unset_pointer (core->anal, core->offset); } break; case '*': // "ah*" if (input[1] == ' ') { char *ptr = strdup (r_str_trim_ro (input + 2)); r_str_word_set0 (ptr); ut64 addr = r_num_math (core->num, r_str_word_get0 (ptr, 0)); r_core_anal_hint_print (core->anal, addr, '*'); } else { r_core_anal_hint_list (core->anal, input[0]); } break; case 'j': // "ahj" case '\0': // "ah" r_core_anal_hint_list (core->anal, input[0]); break; case '-': // "ah-" if (input[1]) { if (input[1] == '*') { r_anal_hint_clear (core->anal); } else { char *ptr = strdup (r_str_trim_ro (input + 1)); ut64 addr; int size = 1; int i = r_str_word_set0 (ptr); if (i == 2) { size = r_num_math (core->num, r_str_word_get0 (ptr, 1)); } const char *a0 = r_str_word_get0 (ptr, 0); if (a0 && *a0) { addr = r_num_math (core->num, a0); } else { addr = core->offset; } r_anal_hint_del (core->anal, addr, size); free (ptr); } } else { r_anal_hint_clear (core->anal); } break; } } static void agraph_print_node_dot(RANode *n, void *user) { char *label = strdup (n->body); //label = r_str_replace (label, "\n", "\\l", 1); if (!label || !*label) { r_cons_printf ("\"%s\" [URL=\"%s\", color=\"lightgray\", label=\"%s\"]\n", n->title, n->title, n->title); } else { r_cons_printf ("\"%s\" [URL=\"%s\", color=\"lightgray\", label=\"%s\\n%s\"]\n", n->title, n->title, n->title, label); } free (label); } static void agraph_print_node(RANode *n, void *user) { char *encbody, *cmd; int len = strlen (n->body); if (n->body[len - 1] == '\n') { len--; } encbody = r_base64_encode_dyn (n->body, len); cmd = r_str_newf ("agn \"%s\" base64:%s\n", n->title, encbody); r_cons_printf (cmd); free (cmd); free (encbody); } static void agraph_print_edge_dot(RANode *from, RANode *to, void *user) { r_cons_printf ("\"%s\" -> \"%s\"\n", from->title, to->title); } static void agraph_print_edge(RANode *from, RANode *to, void *user) { r_cons_printf ("age \"%s\" \"%s\"\n", from->title, to->title); } static void cmd_agraph_node(RCore *core, const char *input) { switch (*input) { case ' ': { // "agn" char *newbody = NULL; char **args, *body; int n_args, B_LEN = strlen ("base64:"); input++; args = r_str_argv (input, &n_args); if (n_args < 1 || n_args > 2) { r_cons_printf ("Wrong arguments\n"); r_str_argv_free (args); break; } // strdup cause there is double free in r_str_argv_free due to a realloc call if (n_args > 1) { body = strdup (args[1]); if (strncmp (body, "base64:", B_LEN) == 0) { body = r_str_replace (body, "\\n", "", true); newbody = (char *)r_base64_decode_dyn (body + B_LEN, -1); free (body); if (!newbody) { eprintf ("Cannot allocate buffer\n"); r_str_argv_free (args); break; } body = newbody; } body = r_str_append (body, "\n"); } else { body = strdup (""); } r_agraph_add_node (core->graph, args[0], body); r_str_argv_free (args); free (body); //free newbody it's not necessary since r_str_append reallocate the space break; } case '-': { // "agn-" char **args; int n_args; input++; args = r_str_argv (input, &n_args); if (n_args != 1) { r_cons_printf ("Wrong arguments\n"); r_str_argv_free (args); break; } r_agraph_del_node (core->graph, args[0]); r_str_argv_free (args); break; } case '?': default: r_core_cmd_help (core, help_msg_agn); break; } } static void cmd_agraph_edge(RCore *core, const char *input) { switch (*input) { case ' ': // "age" case '-': { // "age-" RANode *u, *v; char **args; int n_args; args = r_str_argv (input + 1, &n_args); if (n_args != 2) { r_cons_printf ("Wrong arguments\n"); r_str_argv_free (args); break; } u = r_agraph_get_node (core->graph, args[0]); v = r_agraph_get_node (core->graph, args[1]); if (!u || !v) { if (!u) { r_cons_printf ("Node %s not found!\n", args[0]); } else { r_cons_printf ("Node %s not found!\n", args[1]); } r_str_argv_free (args); break; } if (*input == ' ') { r_agraph_add_edge (core->graph, u, v); } else { r_agraph_del_edge (core->graph, u, v); } r_str_argv_free (args); break; } case '?': default: r_core_cmd_help (core, help_msg_age); break; } } static void cmd_agraph_print(RCore *core, const char *input) { switch (*input) { case 'k': // "aggk" { Sdb *db = r_agraph_get_sdb (core->graph); char *o = sdb_querys (db, "null", 0, "*"); r_cons_print (o); free (o); break; } case 'v': // "aggv" { const char *cmd = r_config_get (core->config, "cmd.graph"); if (cmd && *cmd) { char *newCmd = strdup (cmd); if (newCmd) { newCmd = r_str_replace (newCmd, "ag $$", "aggd", 0); r_core_cmd0 (core, newCmd); free (newCmd); } } else { r_core_cmd0 (core, "agf"); } break; } case 'i': // "aggi" - open current core->graph in interactive mode { RANode *ran = r_agraph_get_first_node (core->graph); if (ran) { r_agraph_set_title (core->graph, r_config_get (core->config, "graph.title")); r_agraph_set_curnode (core->graph, ran); core->graph->force_update_seek = true; core->graph->need_set_layout = true; core->graph->layout = r_config_get_i (core->config, "graph.layout"); int ov = r_config_get_i (core->config, "scr.interactive"); core->graph->need_update_dim = true; r_core_visual_graph (core, core->graph, NULL, true); r_config_set_i (core->config, "scr.interactive", ov); r_cons_show_cursor (true); } else { eprintf ("This graph contains no nodes\n"); } break; } case 'd': // "aggd" - dot format r_cons_printf ("digraph code {\ngraph [bgcolor=white];\n" "node [color=lightgray, style=filled shape=box " "fontname=\"Courier\" fontsize=\"8\"];\n"); r_agraph_foreach (core->graph, agraph_print_node_dot, NULL); r_agraph_foreach_edge (core->graph, agraph_print_edge_dot, NULL); r_cons_printf ("}\n"); break; case '*': // "agg*" - r_agraph_foreach (core->graph, agraph_print_node, NULL); r_agraph_foreach_edge (core->graph, agraph_print_edge, NULL); break; case '?': r_core_cmd_help (core, help_msg_agg); break; default: core->graph->can->linemode = r_config_get_i (core->config, "graph.linemode"); core->graph->can->color = r_config_get_i (core->config, "scr.color"); r_agraph_set_title (core->graph, r_config_get (core->config, "graph.title")); r_agraph_print (core->graph); break; } } static void cmd_anal_graph(RCore *core, const char *input) { RList *list; const char *arg; switch (input[0]) { case 'f': // "agf" switch (input[1]) { case 't':// "agft" - tiny graph r_core_visual_graph (core, NULL, NULL, 2); break; case 0: r_core_visual_graph (core, NULL, NULL, false); break; default: eprintf ("Usage: agf or agft (for tiny)\n"); break; } break; case '-': // "ag-" r_agraph_reset (core->graph); break; case 'n': // "agn" cmd_agraph_node (core, input + 1); break; case 'e': // "age" cmd_agraph_edge (core, input + 1); break; case 'g': // "agg" cmd_agraph_print (core, input + 1); break; case 's': // "ags" r_core_anal_graph (core, r_num_math (core->num, input + 1), 0); break; case 't': // "agt" list = r_core_anal_graph_to (core, r_num_math (core->num, input + 1), 0); if (list) { RListIter *iter, *iter2; RList *list2; RAnalBlock *bb; r_list_foreach (list, iter, list2) { r_list_foreach (list2, iter2, bb) { r_cons_printf ("-> 0x%08" PFMT64x "\n", bb->addr); } } r_list_purge (list); free (list); } break; case 'C': // "agC" r_core_anal_coderefs (core, UT64_MAX, input[1] == 'j'? 2: 1); break; case 'r': // "refs" switch (input[1]) { case '*': case 'j': case ' ': case 0: { ut64 addr = input[2]? r_num_math (core->num, input + 2): core->offset; r_core_anal_codexrefs (core, addr, '*'); } break; default: eprintf ("|ERROR| Usage: agr[*j]\n"); break; } break; case 'c': // "agc" if (input[1] == '*') { ut64 addr = input[2]? r_num_math (core->num, input + 2): UT64_MAX; r_core_anal_coderefs (core, addr, '*'); } else if (input[1] == 'j') { ut64 addr = input[2]? r_num_math (core->num, input + 2): UT64_MAX; r_core_anal_coderefs (core, addr, 2); } else if (input[1] == ' ') { ut64 addr = input[2]? r_num_math (core->num, input + 1): UT64_MAX; r_core_anal_coderefs (core, addr, 1); } else { eprintf ("|ERROR| Usage: agc[j*] ([addr])\n"); } break; case 'j': // "agj" r_core_anal_graph (core, r_num_math (core->num, input + 1), R_CORE_ANAL_JSON); break; case 'J': // "agJ" r_core_anal_graph (core, r_num_math (core->num, input + 1), R_CORE_ANAL_JSON | R_CORE_ANAL_JSON_FORMAT_DISASM); break; case 'k': // "agk" r_core_anal_graph (core, r_num_math (core->num, input + 1), R_CORE_ANAL_KEYVALUE); break; case 'l': // "agl" r_core_anal_graph (core, r_num_math (core->num, input + 1), R_CORE_ANAL_GRAPHLINES); break; case 'a': // "aga" r_core_anal_graph (core, r_num_math (core->num, input + 1), 0); break; case 'd': // "agd" r_core_anal_graph (core, r_num_math (core->num, input + 1), R_CORE_ANAL_GRAPHBODY | R_CORE_ANAL_GRAPHDIFF); break; case 'v': // "agv" if (r_config_get_i (core->config, "graph.web")) { r_core_cmd0 (core, "=H /graph/"); } else { const char *cmd = r_config_get (core->config, "cmd.graph"); if (cmd && *cmd) { r_core_cmd0 (core, cmd); } else { r_core_cmd0 (core, "agf"); } } break; case '?': // "ag?" r_core_cmd_help (core, help_msg_ag); break; case ' ': // "ag" arg = strchr (input, ' '); r_core_anal_graph (core, r_num_math (core->num, arg? arg + 1: NULL), R_CORE_ANAL_GRAPHBODY); break; case 0: eprintf ("|ERROR| Usage: ag [addr]\n"); break; default: eprintf ("See ag?\n"); break; } } R_API int r_core_anal_refs(RCore *core, const char *input) { int cfg_debug = r_config_get_i (core->config, "cfg.debug"); ut64 from, to; char *ptr; int rad, n; if (*input == '?') { r_core_cmd_help (core, help_msg_aar); return 0; } if (*input == 'j' || *input == '*') { rad = *input; input++; } else { rad = 0; } from = to = 0; ptr = r_str_trim_head (strdup (input)); n = r_str_word_set0 (ptr); if (!n) { // get boundaries of current memory map, section or io map if (cfg_debug) { RDebugMap *map = r_debug_map_get (core->dbg, core->offset); if (map) { from = map->addr; to = map->addr_end; } } else { RList *list = r_core_get_boundaries_prot (core, R_IO_EXEC, NULL, "anal"); RListIter *iter; RIOMap* map; r_list_foreach (list, iter, map) { from = map->itv.addr; to = r_itv_end (map->itv); if (r_cons_is_breaked ()) { break; } if (!from && !to) { eprintf ("Cannot determine xref search boundaries\n"); } else if (to - from > UT32_MAX) { eprintf ("Skipping huge range\n"); } else { r_core_anal_search_xrefs (core, from, to, rad); } } free (ptr); return 1; } } else if (n == 1) { from = core->offset; to = core->offset + r_num_math (core->num, r_str_word_get0 (ptr, 0)); } else { eprintf ("Invalid number of arguments\n"); } free (ptr); if (from == UT64_MAX && to == UT64_MAX) { return false; } if (!from && !to) { return false; } if (to - from > r_io_size (core->io)) { return false; } return r_core_anal_search_xrefs (core, from, to, rad); } static const char *oldstr = NULL; static void rowlog(RCore *core, const char *str) { int use_color = core->print->flags & R_PRINT_FLAGS_COLOR; bool verbose = r_config_get_i (core->config, "scr.prompt"); oldstr = str; if (!verbose) { return; } if (use_color) { eprintf ("[ ] "Color_YELLOW"%s\r["Color_RESET, str); } else { eprintf ("[ ] %s\r[", str); } } static void rowlog_done(RCore *core) { int use_color = core->print->flags & R_PRINT_FLAGS_COLOR; bool verbose = r_config_get_i (core->config, "scr.prompt"); if (verbose) { if (use_color) eprintf ("\r"Color_GREEN"[x]"Color_RESET" %s\n", oldstr); else eprintf ("\r[x] %s\n", oldstr); } } static int compute_coverage(RCore *core) { RListIter *iter; SdbListIter *iter2; RAnalFunction *fcn; RIOSection *sec; int cov = 0; r_list_foreach (core->anal->fcns, iter, fcn) { ls_foreach (core->io->sections, iter2, sec) { if (sec->flags & 1) { ut64 section_end = sec->vaddr + sec->vsize; ut64 s = r_anal_fcn_realsize (fcn); if (fcn->addr >= sec->vaddr && (fcn->addr + s) < section_end) { cov += s; } } } } return cov; } static int compute_code (RCore* core) { int code = 0; SdbListIter *iter; RIOSection *sec; ls_foreach (core->io->sections, iter, sec) { if (sec->flags & 1) { code += sec->vsize; } } return code; } static int compute_calls(RCore *core) { RListIter *iter; RAnalFunction *fcn; RList *xrefs; int cov = 0; r_list_foreach (core->anal->fcns, iter, fcn) { xrefs = r_anal_fcn_get_xrefs (core->anal, fcn); if (xrefs) { cov += r_list_length (xrefs); r_list_free (xrefs); xrefs = NULL; } } return cov; } static void r_core_anal_info (RCore *core, const char *input) { int fcns = r_list_length (core->anal->fcns); int strs = r_flag_count (core->flags, "str.*"); int syms = r_flag_count (core->flags, "sym.*"); int imps = r_flag_count (core->flags, "sym.imp.*"); int code = compute_code (core); int covr = compute_coverage (core); int call = compute_calls (core); int xrfs = r_anal_xrefs_count (core->anal); int cvpc = (code > 0)? (covr * 100 / code): 0; if (*input == 'j') { r_cons_printf ("{\"fcns\":%d", fcns); r_cons_printf (",\"xrefs\":%d", xrfs); r_cons_printf (",\"calls\":%d", call); r_cons_printf (",\"strings\":%d", strs); r_cons_printf (",\"symbols\":%d", syms); r_cons_printf (",\"imports\":%d", imps); r_cons_printf (",\"covrage\":%d", covr); r_cons_printf (",\"codesz\":%d", code); r_cons_printf (",\"percent\":%d}\n", cvpc); } else { r_cons_printf ("fcns %d\n", fcns); r_cons_printf ("xrefs %d\n", xrfs); r_cons_printf ("calls %d\n", call); r_cons_printf ("strings %d\n", strs); r_cons_printf ("symbols %d\n", syms); r_cons_printf ("imports %d\n", imps); r_cons_printf ("covrage %d\n", covr); r_cons_printf ("codesz %d\n", code); r_cons_printf ("percent %d%%\n", cvpc); } } static void cmd_anal_aad(RCore *core, const char *input) { RListIter *iter; RAnalRef *ref; RList *list = r_list_newf (NULL); r_anal_xrefs_from (core->anal, list, "xref", R_ANAL_REF_TYPE_DATA, UT64_MAX); r_list_foreach (list, iter, ref) { if (r_io_is_valid_offset (core->io, ref->addr, false)) { r_core_anal_fcn (core, ref->at, ref->addr, R_ANAL_REF_TYPE_NULL, 1); } } r_list_free (list); } static bool archIsArmOrThumb(RCore *core) { RAsm *as = core ? core->assembler : NULL; if (as && as->cur && as->cur->arch) { if (r_str_startswith (as->cur->arch, "mips")) { return true; } if (r_str_startswith (as->cur->arch, "arm")) { if (as->bits < 64) { return true; } } } return false; } const bool archIsMips (RCore *core) { return strstr (core->assembler->cur->name, "mips"); } void _CbInRangeAav(RCore *core, ut64 from, ut64 to, int vsize, bool asterisk, int count) { bool isarm = archIsArmOrThumb (core); if (isarm) { if (to & 1) { // .dword 0x000080b9 in reality is 0x000080b8 to--; r_anal_hint_set_bits (core->anal, to, 16); // can we assume is gonna be always a function? } else { r_core_seek_archbits (core, from); ut64 bits = r_config_get_i (core->config, "asm.bits"); r_anal_hint_set_bits (core->anal, from, bits); } } else { bool ismips = archIsMips (core); if (ismips) { if (from % 4 || to % 4) { eprintf ("False positive\n"); return; } } } if (asterisk) { r_cons_printf ("ax 0x%"PFMT64x " 0x%"PFMT64x "\n", to, from); r_cons_printf ("Cd %d @ 0x%"PFMT64x "\n", vsize, from); r_cons_printf ("f+ aav.0x%08"PFMT64x "= 0x%08"PFMT64x, to, to); } else { #if 1 r_anal_ref_add (core->anal, to, from, ' '); r_meta_add (core->anal, 'd', from, from + vsize, NULL); if (!r_flag_get_at (core->flags, to, false)) { char *name = r_str_newf ("aav.0x%08"PFMT64x, to); r_flag_set (core->flags, name, to, vsize); free (name); } #else r_core_cmdf (core, "ax 0x%"PFMT64x " 0x%"PFMT64x, to, from); r_core_cmdf (core, "Cd %d @ 0x%"PFMT64x, vsize, from); r_core_cmdf (core, "f+ aav.0x%08"PFMT64x "= 0x%08"PFMT64x, to, to); #endif } } static void cmd_anal_aav(RCore *core, const char *input) { #define seti(x,y) r_config_set_i(core->config, x, y); #define geti(x) r_config_get_i(core->config, x); ut64 o_align = geti ("search.align"); bool asterisk = strchr (input, '*');; bool is_debug = r_config_get_i (core->config, "cfg.debug"); // pre int archAlign = r_anal_archinfo (core->anal, R_ANAL_ARCHINFO_ALIGN); seti ("search.align", archAlign); int vsize = 4; // 32bit dword if (core->assembler->bits == 64) { vsize = 8; } // body r_cons_break_push (NULL, NULL); if (is_debug) { RList *list = r_core_get_boundaries_prot (core, 0, "dbg.map", "anal"); RListIter *iter; RIOMap *map; r_list_foreach (list, iter, map) { if (r_cons_is_breaked ()) { break; } eprintf ("aav: from 0x%"PFMT64x" to 0x%"PFMT64x"\n", map->itv.addr, r_itv_end (map->itv)); (void)r_core_search_value_in_range (core, map->itv, map->itv.addr, r_itv_end (map->itv), vsize, asterisk, _CbInRangeAav); } r_list_free (list); } else { RList *list = r_core_get_boundaries_prot (core, 0, NULL, "anal"); RListIter *iter, *iter2; RIOMap *map, *map2; ut64 from = UT64_MAX; ut64 to = UT64_MAX; // find values pointing to non-executable regions r_list_foreach (list, iter2, map2) { if (r_cons_is_breaked ()) { break; } //TODO: Reduce multiple hits for same addr from = r_itv_begin (map2->itv); to = r_itv_end (map2->itv); eprintf ("Value from 0x%08"PFMT64x " to 0x%08" PFMT64x "\n", from, to); r_list_foreach (list, iter, map) { ut64 begin = map->itv.addr; ut64 end = r_itv_end (map->itv); if (r_cons_is_breaked ()) { break; } if (end - begin > UT32_MAX) { eprintf ("Skipping huge range\n"); continue; } eprintf ("aav: 0x%08"PFMT64x"-0x%08"PFMT64x" in 0x%"PFMT64x"-0x%"PFMT64x"\n", from, to, begin, end); (void)r_core_search_value_in_range (core, map->itv, from, to, vsize, asterisk, _CbInRangeAav); } } r_list_free (list); } r_cons_break_pop (); // end seti ("search.align", o_align); } static bool should_aav(RCore *core) { // Don't aav on x86 for now if (r_str_startswith (r_config_get (core->config, "asm.arch"), "x86")) { return false; } return true; } static int cmd_anal_all(RCore *core, const char *input) { switch (*input) { case '?': r_core_cmd_help (core, help_msg_aa); break; case 'b': // "aab" cmd_anal_blocks (core, input + 1); break; // "aab" case 'f': // "aaf" { int analHasnext = r_config_get_i (core->config, "anal.hasnext"); r_config_set_i (core->config, "anal.hasnext", true); r_core_cmd0 (core, "afr@@c:isq"); r_config_set_i (core->config, "anal.hasnext", analHasnext); } break; case 'c': // "aac" switch (input[1]) { case '*': cmd_anal_calls (core, input + 1, true); break; // "aac*" default: cmd_anal_calls (core, input + 1, false); break; // "aac" } case 'j': cmd_anal_jumps (core, input + 1); break; // "aaj" case '*': // "aa*" r_core_cmd0 (core, "af @@ sym.*"); r_core_cmd0 (core, "af @@ entry*"); break; case 'd': // "aad" cmd_anal_aad (core, input); break; case 'v': // "aav" cmd_anal_aav (core, input); break; case 'u': // "aau" - print areas not covered by functions r_core_anal_nofunclist (core, input + 1); break; case 'i': // "aai" r_core_anal_info (core, input + 1); break; case 's': // "aas" r_core_cmd0 (core, "af @@= `isq~[0]`"); r_core_cmd0 (core, "af @@ entry*"); break; case 'n': // "aan" r_core_anal_autoname_all_fcns (core); break; //aan case 'p': // "aap" if (*input == '?') { // TODO: accept parameters for ranges eprintf ("Usage: /aap ; find in memory for function preludes"); } else { r_core_search_preludes (core); } break; case '\0': // "aa" case 'a': if (input[0] && (input[1] == '?' || (input[1] && input[2] == '?'))) { r_cons_println ("Usage: See aa? for more help"); } else { char *dh_orig = NULL; if (!strncmp (input, "aaaaa", 5)) { eprintf ("An r2 developer is coming to your place to manually analyze this program. Please wait for it\n"); if (r_config_get_i (core->config, "scr.interactive")) { r_cons_any_key (NULL); } goto jacuzzi; } ut64 curseek = core->offset; rowlog (core, "Analyze all flags starting with sym. and entry0 (aa)"); r_cons_break_push (NULL, NULL); r_cons_break_timeout (r_config_get_i (core->config, "anal.timeout")); r_core_anal_all (core); rowlog_done (core); dh_orig = core->dbg->h ? strdup (core->dbg->h->name) : strdup ("esil"); if (core->io && core->io->desc && core->io->desc->plugin && !core->io->desc->plugin->isdbg) { //use dh_origin if we are debugging R_FREE (dh_orig); } if (r_cons_is_breaked ()) { goto jacuzzi; } r_cons_clear_line (1); if (*input == 'a') { // "aaa" if (dh_orig && strcmp (dh_orig, "esil")) { r_core_cmd0 (core, "dL esil"); } int c = r_config_get_i (core->config, "anal.calls"); if (should_aav (core)) { rowlog (core, "\nAnalyze value pointers (aav)"); r_core_cmd0 (core, "aav"); if (r_cons_is_breaked ()) { goto jacuzzi; } } r_config_set_i (core->config, "anal.calls", 1); r_core_cmd0 (core, "s $S"); rowlog (core, "Analyze len bytes of instructions for references (aar)"); if (r_cons_is_breaked ()) { goto jacuzzi; } (void)r_core_anal_refs (core, ""); // "aar" rowlog_done (core); if (r_cons_is_breaked ()) { goto jacuzzi; } rowlog (core, "Analyze function calls (aac)"); (void) cmd_anal_calls (core, "", false); // "aac" r_core_seek (core, curseek, 1); // rowlog (core, "Analyze data refs as code (LEA)"); // (void) cmd_anal_aad (core, NULL); // "aad" rowlog_done (core); if (r_cons_is_breaked ()) { goto jacuzzi; } if (input[1] == 'a') { // "aaaa" bool ioCache = r_config_get_i (core->config, "io.pcache"); r_config_set_i (core->config, "io.pcache", 1); rowlog (core, "Emulate code to find computed references (aae)"); r_core_cmd0 (core, "aae $SS @ $S"); rowlog_done (core); rowlog (core, "Analyze consecutive function (aat)"); r_core_cmd0 (core, "aat"); rowlog_done (core); // drop cache writes is no cache was if (!ioCache) { r_core_cmd0 (core, "wc-*"); } r_config_set_i (core->config, "io.pcache", ioCache); } else { rowlog (core, "Use -AA or aaaa to perform additional experimental analysis."); rowlog_done (core); } r_config_set_i (core->config, "anal.calls", c); rowlog (core, "Constructing a function name for fcn.* and sym.func.* functions (aan)"); if (r_cons_is_breaked ()) { goto jacuzzi; } if (r_config_get_i (core->config, "anal.autoname")) { r_core_anal_autoname_all_fcns (core); rowlog_done (core); } if (input[1] == 'a') { // "aaaa" bool ioCache = r_config_get_i (core->config, "io.pcache"); r_config_set_i (core->config, "io.pcache", 1); if (sdb_count (core->anal->sdb_zigns) > 0) { rowlog (core, "Check for zignature from zigns folder (z/)"); r_core_cmd0 (core, "z/"); } rowlog (core, "Type matching analysis for all functions (afta)"); r_core_cmd0 (core, "afta"); rowlog_done (core); if (!ioCache) { r_core_cmd0 (core, "wc-*"); } r_config_set_i (core->config, "io.pcache", ioCache); } r_core_cmd0 (core, "s-"); if (dh_orig) { r_core_cmdf (core, "dL %s;dpa", dh_orig); } } r_core_seek (core, curseek, 1); jacuzzi: flag_every_function (core); r_cons_break_pop (); R_FREE (dh_orig); } break; case 't': { // "aat" ut64 cur = core->offset; bool hasnext = r_config_get_i (core->config, "anal.hasnext"); RListIter *iter; RIOMap* map; RList *list = r_core_get_boundaries_prot (core, R_IO_EXEC, NULL, "anal"); r_list_foreach (list, iter, map) { r_core_seek (core, map->itv.addr, 1); r_config_set_i (core->config, "anal.hasnext", 1); r_core_cmd0 (core, "afr"); r_config_set_i (core->config, "anal.hasnext", hasnext); } r_core_seek (core, cur, 1); break; } case 'T': // "aaT" cmd_anal_aftertraps (core, input + 1); break; case 'E': // "aaE" r_core_cmd0 (core, "aef @@f"); break; case 'e': // "aae" if (input[1]) { const char *len = (char *) input + 1; char *addr = strchr (input + 2, ' '); if (addr) { *addr++ = 0; } r_core_anal_esil (core, len, addr); } else { ut64 at = core->offset; RIOMap* map; RListIter *iter; RList *list = r_core_get_boundaries_prot (core, -1, NULL, "anal"); r_list_foreach (list, iter, map) { r_core_seek (core, map->itv.addr, 1); r_core_anal_esil (core, "$SS", NULL); } r_core_seek (core, at, 1); } break; case 'r': (void)r_core_anal_refs (core, input + 1); break; default: r_core_cmd_help (core, help_msg_aa); break; } return true; } static bool anal_fcn_data (RCore *core, const char *input) { RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, core->offset, -1); ut32 fcn_size = r_anal_fcn_size (fcn); if (fcn) { int i; bool gap = false; ut64 gap_addr = UT64_MAX; char *bitmap = calloc (1, fcn_size); if (bitmap) { RAnalBlock *b; RListIter *iter; r_list_foreach (fcn->bbs, iter, b) { int f = b->addr - fcn->addr; int t = R_MIN (f + b->size, fcn_size); if (f >= 0) { while (f < t) { bitmap[f++] = 1; } } } } for (i = 0; i < fcn_size; i++) { ut64 here = fcn->addr + i; if (bitmap && bitmap[i]) { if (gap) { r_cons_printf ("Cd %d @ 0x%08"PFMT64x"\n", here - gap_addr, gap_addr); gap = false; } gap_addr = UT64_MAX; } else { if (!gap) { gap = true; gap_addr = here; } } } if (gap) { r_cons_printf ("Cd %d @ 0x%08"PFMT64x"\n", fcn->addr + fcn_size - gap_addr, gap_addr); gap = false; } free (bitmap); return true; } return false; } static int cmpaddr (const void *_a, const void *_b) { const RAnalFunction *a = _a, *b = _b; return a->addr - b->addr; } static bool anal_fcn_data_gaps (RCore *core, const char *input) { ut64 end = UT64_MAX; RAnalFunction *fcn; RListIter *iter; int i, wordsize = (core->assembler->bits == 64)? 8: 4; r_list_sort (core->anal->fcns, cmpaddr); r_list_foreach (core->anal->fcns, iter, fcn) { if (end != UT64_MAX) { int range = fcn->addr - end; if (range > 0) { for (i = 0; i + wordsize < range; i+= wordsize) { r_cons_printf ("Cd %d @ 0x%08"PFMT64x"\n", wordsize, end + i); } r_cons_printf ("Cd %d @ 0x%08"PFMT64x"\n", range - i, end + i); //r_cons_printf ("Cd %d @ 0x%08"PFMT64x"\n", range, end); } } end = fcn->addr + r_anal_fcn_size (fcn); } return true; } static void cmd_anal_rtti(RCore *core, const char *input) { switch (input[0]) { case '\0': // "avr" case 'j': // "avrj" r_anal_rtti_print_at_vtable (core->anal, core->offset, input[0]); break; case 'a': // "avra" r_anal_rtti_print_all (core->anal, input[1]); break; default : r_core_cmd_help (core, help_msg_av); break; } } static void cmd_anal_virtual_functions(RCore *core, const char* input) { switch (input[0]) { case '\0': // "av" case '*': // "av*" case 'j': // "avj" r_anal_list_vtables (core->anal, input[0]); break; case 'r': // "avr" cmd_anal_rtti (core, input + 1); break; default : r_core_cmd_help (core, help_msg_av); break; } } static int cmd_anal(void *data, const char *input) { const char *r; RCore *core = (RCore *)data; ut32 tbs = core->blocksize; switch (input[0]) { case 'p': // "ap" { const ut8 *prelude = (const ut8*)"\xe9\x2d"; //:fffff000"; const int prelude_sz = 2; const int bufsz = 4096; ut8 *buf = calloc (1, bufsz); ut64 off = core->offset; if (input[1] == ' ') { off = r_num_math (core->num, input+1); r_io_read_at (core->io, off - bufsz + prelude_sz, buf, bufsz); } else { r_io_read_at (core->io, off - bufsz + prelude_sz, buf, bufsz); } //const char *prelude = "\x2d\xe9\xf0\x47"; //:fffff000"; r_mem_reverse (buf, bufsz); //r_print_hexdump (NULL, off, buf, bufsz, 16, -16); const ut8 *pos = r_mem_mem (buf, bufsz, prelude, prelude_sz); if (pos) { int delta = (size_t)(pos - buf); eprintf ("POS = %d\n", delta); eprintf ("HIT = 0x%"PFMT64x"\n", off - delta); r_cons_printf ("0x%08"PFMT64x"\n", off - delta); } else { eprintf ("Cannot find prelude\n"); } free (buf); } break; case '8': { ut8 *buf = malloc (strlen (input) + 1); if (buf) { int len = r_hex_str2bin (input + 1, buf); if (len > 0) { core_anal_bytes (core, buf, len, 0, input[1]); } free (buf); } } break; case 'b': if (input[1] == 'b') { // "abb" core_anal_bbs (core, input + 2); } else if (input[1] == 'r') { // "abr" core_anal_bbs_range (core, input + 2); } else if (input[1] == ' ' || !input[1]) { // find block ut64 addr = core->offset; if (input[1]) { addr = r_num_math (core->num, input + 1); } r_core_cmdf (core, "afbi @ 0x%"PFMT64x, addr); } else { r_core_cmd_help (core, help_msg_ab); } break; case 'i': cmd_anal_info (core, input + 1); break; // "ai" case 'r': cmd_anal_reg (core, input + 1); break; // "ar" case 'e': cmd_anal_esil (core, input + 1); break; // "ae" case 'o': cmd_anal_opcode (core, input + 1); break; // "ao" case 'O': cmd_anal_bytes (core, input + 1); break; // "aO" case 'F': // "aF" r_core_anal_fcn (core, core->offset, UT64_MAX, R_ANAL_REF_TYPE_NULL, 1); break; case 'f': // "af" { int res = cmd_anal_fcn (core, input); if (!res) { return false; } } break; case 'n': // 'an' { const char *name = NULL; bool use_json = false; if (input[1] == 'j') { use_json = true; input++; } if (input[1] == ' ') { name = input + 1; while (name[0] == ' ') { name++; } char *end = strchr (name, ' '); if (end) { *end = '\0'; } if (*name == '\0') { name = NULL; } } cmd_an (core, use_json, name); } break; case 'g': // "ag" cmd_anal_graph (core, input + 1); break; case 's': // "as" cmd_anal_syscall (core, input + 1); break; case 'v': // "av" cmd_anal_virtual_functions (core, input + 1); break; case 'x': // "ax" if (!cmd_anal_refs (core, input + 1)) { return false; } break; case 'a': // "aa" if (!cmd_anal_all (core, input + 1)) return false; break; case 'c': // "ac" { RList *hooks; RListIter *iter; RAnalCycleHook *hook; char *instr_tmp = NULL; int ccl = input[1]? r_num_math (core->num, &input[2]): 0; //get cycles to look for int cr = r_config_get_i (core->config, "asm.cmt.right"); int fun = r_config_get_i (core->config, "asm.functions"); int li = r_config_get_i (core->config, "asm.lines"); int xr = r_config_get_i (core->config, "asm.xrefs"); r_config_set_i (core->config, "asm.cmt.right", true); r_config_set_i (core->config, "asm.functions", false); r_config_set_i (core->config, "asm.lines", false); r_config_set_i (core->config, "asm.xrefs", false); hooks = r_core_anal_cycles (core, ccl); //analyse r_cons_clear_line (1); r_list_foreach (hooks, iter, hook) { instr_tmp = r_core_disassemble_instr (core, hook->addr, 1); r_cons_printf ("After %4i cycles:\t%s", (ccl - hook->cycles), instr_tmp); r_cons_flush (); free (instr_tmp); } r_list_free (hooks); r_config_set_i (core->config, "asm.cmt.right", cr); //reset settings r_config_set_i (core->config, "asm.functions", fun); r_config_set_i (core->config, "asm.lines", li); r_config_set_i (core->config, "asm.xrefs", xr); } break; case 'd': // "ad" switch (input[1]) { case 'f': // "adf" if (input[2] == 'g') { anal_fcn_data_gaps (core, input + 1); } else { anal_fcn_data (core, input + 1); } break; case 't': // "adt" cmd_anal_trampoline (core, input + 2); break; case ' ': { // "ad" const int default_depth = 1; const char *p; int a, b; a = r_num_math (core->num, input + 2); p = strchr (input + 2, ' '); b = p? r_num_math (core->num, p + 1): default_depth; if (a < 1) { a = 1; } if (b < 1) { b = 1; } r_core_anal_data (core, core->offset, a, b, 0); } break; case 'k': // "adk" r = r_anal_data_kind (core->anal, core->offset, core->block, core->blocksize); r_cons_println (r); break; case '\0': // "ad" r_core_anal_data (core, core->offset, 2 + (core->blocksize / 4), 1, 0); break; case '4': // "ad4" r_core_anal_data (core, core->offset, 2 + (core->blocksize / 4), 1, 4); break; case '8': // "ad8" r_core_anal_data (core, core->offset, 2 + (core->blocksize / 4), 1, 8); break; default: r_core_cmd_help (core, help_msg_ad); break; } break; case 'h': // "ah" cmd_anal_hint (core, input + 1); break; case '!': // "a!" if (core->anal && core->anal->cur && core->anal->cur->cmd_ext) { return core->anal->cur->cmd_ext (core->anal, input + 1); } else { r_cons_printf ("No plugins for this analysis plugin\n"); } break; default: r_core_cmd_help (core, help_msg_a); #if 0 r_cons_printf ("Examples:\n" " f ts @ `S*~text:0[3]`; f t @ section..text\n" " f ds @ `S*~data:0[3]`; f d @ section..data\n" " .ad t t+ts @ d:ds\n", NULL); #endif break; } if (tbs != core->blocksize) { r_core_block_size (core, tbs); } if (r_cons_is_breaked ()) { r_cons_clear_line (1); } return 0; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_148_1
crossvul-cpp_data_good_4494_0
/** * FreeRDP: A Remote Desktop Protocol Implementation * * Copyright 2014 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2017 Armin Novak <armin.novak@thincast.com> * Copyright 2017 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <errno.h> #include <winpr/crt.h> #include <winpr/ssl.h> #include <winpr/wnd.h> #include <winpr/path.h> #include <winpr/cmdline.h> #include <winpr/winsock.h> #include <freerdp/log.h> #include <freerdp/version.h> #include <winpr/tools/makecert.h> #ifndef _WIN32 #include <sys/select.h> #include <signal.h> #endif #include "shadow.h" #define TAG SERVER_TAG("shadow") static const char bind_address[] = "bind-address,"; static const COMMAND_LINE_ARGUMENT_A shadow_args[] = { { "port", COMMAND_LINE_VALUE_REQUIRED, "<number>", NULL, NULL, -1, NULL, "Server port" }, { "ipc-socket", COMMAND_LINE_VALUE_REQUIRED, "<ipc-socket>", NULL, NULL, -1, NULL, "Server IPC socket" }, { "bind-address", COMMAND_LINE_VALUE_REQUIRED, "<bind-address>[,<another address>, ...]", NULL, NULL, -1, NULL, "An address to bind to. Use '[<ipv6>]' for IPv6 addresses, e.g. '[::1]' for " "localhost" }, { "monitors", COMMAND_LINE_VALUE_OPTIONAL, "<0,1,2...>", NULL, NULL, -1, NULL, "Select or list monitors" }, { "rect", COMMAND_LINE_VALUE_REQUIRED, "<x,y,w,h>", NULL, NULL, -1, NULL, "Select rectangle within monitor to share" }, { "auth", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueFalse, NULL, -1, NULL, "Clients must authenticate" }, { "may-view", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueTrue, NULL, -1, NULL, "Clients may view without prompt" }, { "may-interact", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueTrue, NULL, -1, NULL, "Clients may interact without prompt" }, { "sec", COMMAND_LINE_VALUE_REQUIRED, "<rdp|tls|nla|ext>", NULL, NULL, -1, NULL, "force specific protocol security" }, { "sec-rdp", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueTrue, NULL, -1, NULL, "rdp protocol security" }, { "sec-tls", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueTrue, NULL, -1, NULL, "tls protocol security" }, { "sec-nla", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueTrue, NULL, -1, NULL, "nla protocol security" }, { "sec-ext", COMMAND_LINE_VALUE_BOOL, NULL, BoolValueFalse, NULL, -1, NULL, "nla extended protocol security" }, { "sam-file", COMMAND_LINE_VALUE_REQUIRED, "<file>", NULL, NULL, -1, NULL, "NTLM SAM file for NLA authentication" }, { "version", COMMAND_LINE_VALUE_FLAG | COMMAND_LINE_PRINT_VERSION, NULL, NULL, NULL, -1, NULL, "Print version" }, { "buildconfig", COMMAND_LINE_VALUE_FLAG | COMMAND_LINE_PRINT_BUILDCONFIG, NULL, NULL, NULL, -1, NULL, "Print the build configuration" }, { "help", COMMAND_LINE_VALUE_FLAG | COMMAND_LINE_PRINT_HELP, NULL, NULL, NULL, -1, "?", "Print help" }, { NULL, 0, NULL, NULL, NULL, -1, NULL, NULL } }; static int shadow_server_print_command_line_help(int argc, char** argv) { char* str; size_t length; COMMAND_LINE_ARGUMENT_A* arg; COMMAND_LINE_ARGUMENT_A largs[ARRAYSIZE(shadow_args)]; memcpy(largs, shadow_args, sizeof(shadow_args)); if (argc < 1) return -1; WLog_INFO(TAG, "Usage: %s [options]", argv[0]); WLog_INFO(TAG, ""); WLog_INFO(TAG, "Syntax:"); WLog_INFO(TAG, " /flag (enables flag)"); WLog_INFO(TAG, " /option:<value> (specifies option with value)"); WLog_INFO(TAG, " +toggle -toggle (enables or disables toggle, where '/' is a synonym of '+')"); WLog_INFO(TAG, ""); arg = largs; do { if (arg->Flags & COMMAND_LINE_VALUE_FLAG) { WLog_INFO(TAG, " %s", "/"); WLog_INFO(TAG, "%-20s", arg->Name); WLog_INFO(TAG, "\t%s", arg->Text); } else if ((arg->Flags & COMMAND_LINE_VALUE_REQUIRED) || (arg->Flags & COMMAND_LINE_VALUE_OPTIONAL)) { WLog_INFO(TAG, " %s", "/"); if (arg->Format) { length = (strlen(arg->Name) + strlen(arg->Format) + 2); str = (char*)malloc(length + 1); if (!str) return -1; sprintf_s(str, length + 1, "%s:%s", arg->Name, arg->Format); WLog_INFO(TAG, "%-20s", str); free(str); } else { WLog_INFO(TAG, "%-20s", arg->Name); } WLog_INFO(TAG, "\t%s", arg->Text); } else if (arg->Flags & COMMAND_LINE_VALUE_BOOL) { length = strlen(arg->Name) + 32; str = (char*)malloc(length + 1); if (!str) return -1; sprintf_s(str, length + 1, "%s (default:%s)", arg->Name, arg->Default ? "on" : "off"); WLog_INFO(TAG, " %s", arg->Default ? "-" : "+"); WLog_INFO(TAG, "%-20s", str); free(str); WLog_INFO(TAG, "\t%s", arg->Text); } } while ((arg = CommandLineFindNextArgumentA(arg)) != NULL); return 1; } int shadow_server_command_line_status_print(rdpShadowServer* server, int argc, char** argv, int status) { WINPR_UNUSED(server); if (status == COMMAND_LINE_STATUS_PRINT_VERSION) { WLog_INFO(TAG, "FreeRDP version %s (git %s)", FREERDP_VERSION_FULL, GIT_REVISION); return COMMAND_LINE_STATUS_PRINT_VERSION; } else if (status == COMMAND_LINE_STATUS_PRINT_BUILDCONFIG) { WLog_INFO(TAG, "%s", freerdp_get_build_config()); return COMMAND_LINE_STATUS_PRINT_BUILDCONFIG; } else if (status == COMMAND_LINE_STATUS_PRINT) { return COMMAND_LINE_STATUS_PRINT; } else if (status < 0) { if (shadow_server_print_command_line_help(argc, argv) < 0) return -1; return COMMAND_LINE_STATUS_PRINT_HELP; } return 1; } int shadow_server_parse_command_line(rdpShadowServer* server, int argc, char** argv) { int status; DWORD flags; COMMAND_LINE_ARGUMENT_A* arg; rdpSettings* settings = server->settings; COMMAND_LINE_ARGUMENT_A largs[ARRAYSIZE(shadow_args)]; memcpy(largs, shadow_args, sizeof(shadow_args)); if (argc < 2) return 1; CommandLineClearArgumentsA(largs); flags = COMMAND_LINE_SEPARATOR_COLON; flags |= COMMAND_LINE_SIGIL_SLASH | COMMAND_LINE_SIGIL_PLUS_MINUS; status = CommandLineParseArgumentsA(argc, argv, largs, flags, server, NULL, NULL); if (status < 0) return status; arg = largs; errno = 0; do { if (!(arg->Flags & COMMAND_LINE_ARGUMENT_PRESENT)) continue; CommandLineSwitchStart(arg) CommandLineSwitchCase(arg, "port") { long val = strtol(arg->Value, NULL, 0); if ((errno != 0) || (val <= 0) || (val > UINT16_MAX)) return -1; server->port = (DWORD)val; } CommandLineSwitchCase(arg, "ipc-socket") { /* /bind-address is incompatible */ if (server->ipcSocket) return -1; server->ipcSocket = _strdup(arg->Value); if (!server->ipcSocket) return -1; } CommandLineSwitchCase(arg, "bind-address") { int rc; size_t len = strlen(arg->Value) + sizeof(bind_address); /* /ipc-socket is incompatible */ if (server->ipcSocket) return -1; server->ipcSocket = calloc(len, sizeof(CHAR)); if (!server->ipcSocket) return -1; rc = _snprintf(server->ipcSocket, len, "%s%s", bind_address, arg->Value); if ((rc < 0) || ((size_t)rc != len - 1)) return -1; } CommandLineSwitchCase(arg, "may-view") { server->mayView = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "may-interact") { server->mayInteract = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "rect") { char* p; char* tok[4]; long x = -1, y = -1, w = -1, h = -1; char* str = _strdup(arg->Value); if (!str) return -1; tok[0] = p = str; p = strchr(p + 1, ','); if (!p) { free(str); return -1; } *p++ = '\0'; tok[1] = p; p = strchr(p + 1, ','); if (!p) { free(str); return -1; } *p++ = '\0'; tok[2] = p; p = strchr(p + 1, ','); if (!p) { free(str); return -1; } *p++ = '\0'; tok[3] = p; x = strtol(tok[0], NULL, 0); if (errno != 0) goto fail; y = strtol(tok[1], NULL, 0); if (errno != 0) goto fail; w = strtol(tok[2], NULL, 0); if (errno != 0) goto fail; h = strtol(tok[3], NULL, 0); if (errno != 0) goto fail; fail: free(str); if ((x < 0) || (y < 0) || (w < 1) || (h < 1) || (errno != 0)) return -1; server->subRect.left = x; server->subRect.top = y; server->subRect.right = x + w; server->subRect.bottom = y + h; server->shareSubRect = TRUE; } CommandLineSwitchCase(arg, "auth") { server->authentication = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "sec") { if (strcmp("rdp", arg->Value) == 0) /* Standard RDP */ { settings->RdpSecurity = TRUE; settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->ExtSecurity = FALSE; settings->UseRdpSecurityLayer = TRUE; } else if (strcmp("tls", arg->Value) == 0) /* TLS */ { settings->RdpSecurity = FALSE; settings->TlsSecurity = TRUE; settings->NlaSecurity = FALSE; settings->ExtSecurity = FALSE; } else if (strcmp("nla", arg->Value) == 0) /* NLA */ { settings->RdpSecurity = FALSE; settings->TlsSecurity = FALSE; settings->NlaSecurity = TRUE; settings->ExtSecurity = FALSE; } else if (strcmp("ext", arg->Value) == 0) /* NLA Extended */ { settings->RdpSecurity = FALSE; settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->ExtSecurity = TRUE; } else { WLog_ERR(TAG, "unknown protocol security: %s", arg->Value); } } CommandLineSwitchCase(arg, "sec-rdp") { settings->RdpSecurity = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "sec-tls") { settings->TlsSecurity = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "sec-nla") { settings->NlaSecurity = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "sec-ext") { settings->ExtSecurity = arg->Value ? TRUE : FALSE; } CommandLineSwitchCase(arg, "sam-file") { freerdp_settings_set_string(settings, FreeRDP_NtlmSamFile, arg->Value); } CommandLineSwitchDefault(arg) { } CommandLineSwitchEnd(arg) } while ((arg = CommandLineFindNextArgumentA(arg)) != NULL); arg = CommandLineFindArgumentA(largs, "monitors"); if (arg && (arg->Flags & COMMAND_LINE_ARGUMENT_PRESENT)) { int index; int numMonitors; MONITOR_DEF monitors[16]; numMonitors = shadow_enum_monitors(monitors, 16); if (arg->Flags & COMMAND_LINE_VALUE_PRESENT) { /* Select monitors */ long val = strtol(arg->Value, NULL, 0); if ((val < 0) || (errno != 0) || (val >= numMonitors)) status = COMMAND_LINE_STATUS_PRINT; server->selectedMonitor = val; } else { int width, height; MONITOR_DEF* monitor; /* List monitors */ for (index = 0; index < numMonitors; index++) { monitor = &monitors[index]; width = monitor->right - monitor->left; height = monitor->bottom - monitor->top; WLog_INFO(TAG, " %s [%d] %dx%d\t+%" PRId32 "+%" PRId32 "", (monitor->flags == 1) ? "*" : " ", index, width, height, monitor->left, monitor->top); } status = COMMAND_LINE_STATUS_PRINT; } } return status; } static DWORD WINAPI shadow_server_thread(LPVOID arg) { rdpShadowServer* server = (rdpShadowServer*)arg; BOOL running = TRUE; DWORD status; freerdp_listener* listener = server->listener; shadow_subsystem_start(server->subsystem); while (running) { HANDLE events[32]; DWORD nCount = 0; events[nCount++] = server->StopEvent; nCount += listener->GetEventHandles(listener, &events[nCount], 32 - nCount); if (nCount <= 1) { WLog_ERR(TAG, "Failed to get FreeRDP file descriptor"); break; } status = WaitForMultipleObjects(nCount, events, FALSE, INFINITE); switch (status) { case WAIT_FAILED: case WAIT_OBJECT_0: running = FALSE; break; default: { if (!listener->CheckFileDescriptor(listener)) { WLog_ERR(TAG, "Failed to check FreeRDP file descriptor"); running = FALSE; } else { #ifdef _WIN32 Sleep(100); /* FIXME: listener event handles */ #endif } } break; } } listener->Close(listener); shadow_subsystem_stop(server->subsystem); /* Signal to the clients that server is being stopped and wait for them * to disconnect. */ if (shadow_client_boardcast_quit(server, 0)) { while (ArrayList_Count(server->clients) > 0) { Sleep(100); } } ExitThread(0); return 0; } static BOOL open_port(rdpShadowServer* server, char* address) { BOOL status; char* modaddr = address; if (modaddr) { if (modaddr[0] == '[') { char* end = strchr(address, ']'); if (!end) { WLog_ERR(TAG, "Could not parse bind-address %s", address); return -1; } *end++ = '\0'; if (strlen(end) > 0) { WLog_ERR(TAG, "Excess data after IPv6 address: '%s'", end); return -1; } modaddr++; } } status = server->listener->Open(server->listener, modaddr, (UINT16)server->port); if (!status) { WLog_ERR(TAG, "Problem creating TCP listener. (Port already used or insufficient permissions?)"); } return status; } int shadow_server_start(rdpShadowServer* server) { BOOL ipc; BOOL status; WSADATA wsaData; if (!server) return -1; if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) return -1; #ifndef _WIN32 signal(SIGPIPE, SIG_IGN); #endif server->screen = shadow_screen_new(server); if (!server->screen) { WLog_ERR(TAG, "screen_new failed"); return -1; } server->capture = shadow_capture_new(server); if (!server->capture) { WLog_ERR(TAG, "capture_new failed"); return -1; } /* Bind magic: * * emtpy ... bind TCP all * <local path> ... bind local (IPC) * bind-socket,<address> ... bind TCP to specified interface */ ipc = server->ipcSocket && (strncmp(bind_address, server->ipcSocket, strnlen(bind_address, sizeof(bind_address))) != 0); if (!ipc) { size_t x, count; char** list = CommandLineParseCommaSeparatedValuesEx(NULL, server->ipcSocket, &count); if (!list || (count <= 1)) { if (server->ipcSocket == NULL) { if (!open_port(server, NULL)) { free(list); return -1; } } else { free(list); return -1; } } for (x = 1; x < count; x++) { BOOL success = open_port(server, list[x]); if (!success) { free(list); return -1; } } free(list); } else { status = server->listener->OpenLocal(server->listener, server->ipcSocket); if (!status) { WLog_ERR(TAG, "Problem creating local socket listener. (Port already used or " "insufficient permissions?)"); return -1; } } if (!(server->thread = CreateThread(NULL, 0, shadow_server_thread, (void*)server, 0, NULL))) { return -1; } return 0; } int shadow_server_stop(rdpShadowServer* server) { if (!server) return -1; if (server->thread) { SetEvent(server->StopEvent); WaitForSingleObject(server->thread, INFINITE); CloseHandle(server->thread); server->thread = NULL; server->listener->Close(server->listener); } if (server->screen) { shadow_screen_free(server->screen); server->screen = NULL; } if (server->capture) { shadow_capture_free(server->capture); server->capture = NULL; } return 0; } static int shadow_server_init_config_path(rdpShadowServer* server) { #ifdef _WIN32 if (!server->ConfigPath) { server->ConfigPath = GetEnvironmentSubPath("LOCALAPPDATA", "freerdp"); } #endif #ifdef __APPLE__ if (!server->ConfigPath) { char* userLibraryPath; char* userApplicationSupportPath; userLibraryPath = GetKnownSubPath(KNOWN_PATH_HOME, "Library"); if (userLibraryPath) { if (!PathFileExistsA(userLibraryPath) && !PathMakePathA(userLibraryPath, 0)) { WLog_ERR(TAG, "Failed to create directory '%s'", userLibraryPath); free(userLibraryPath); return -1; } userApplicationSupportPath = GetCombinedPath(userLibraryPath, "Application Support"); if (userApplicationSupportPath) { if (!PathFileExistsA(userApplicationSupportPath) && !PathMakePathA(userApplicationSupportPath, 0)) { WLog_ERR(TAG, "Failed to create directory '%s'", userApplicationSupportPath); free(userLibraryPath); free(userApplicationSupportPath); return -1; } server->ConfigPath = GetCombinedPath(userApplicationSupportPath, "freerdp"); } free(userLibraryPath); free(userApplicationSupportPath); } } #endif if (!server->ConfigPath) { char* configHome; configHome = GetKnownPath(KNOWN_PATH_XDG_CONFIG_HOME); if (configHome) { if (!PathFileExistsA(configHome) && !PathMakePathA(configHome, 0)) { WLog_ERR(TAG, "Failed to create directory '%s'", configHome); free(configHome); return -1; } server->ConfigPath = GetKnownSubPath(KNOWN_PATH_XDG_CONFIG_HOME, "freerdp"); free(configHome); } } if (!server->ConfigPath) return -1; /* no usable config path */ return 1; } static BOOL shadow_server_init_certificate(rdpShadowServer* server) { char* filepath; MAKECERT_CONTEXT* makecert = NULL; BOOL ret = FALSE; const char* makecert_argv[6] = { "makecert", "-rdp", "-live", "-silent", "-y", "5" }; int makecert_argc = (sizeof(makecert_argv) / sizeof(char*)); if (!PathFileExistsA(server->ConfigPath) && !PathMakePathA(server->ConfigPath, 0)) { WLog_ERR(TAG, "Failed to create directory '%s'", server->ConfigPath); return FALSE; } if (!(filepath = GetCombinedPath(server->ConfigPath, "shadow"))) return FALSE; if (!PathFileExistsA(filepath) && !PathMakePathA(filepath, 0)) { if (!CreateDirectoryA(filepath, 0)) { WLog_ERR(TAG, "Failed to create directory '%s'", filepath); goto out_fail; } } server->CertificateFile = GetCombinedPath(filepath, "shadow.crt"); server->PrivateKeyFile = GetCombinedPath(filepath, "shadow.key"); if (!server->CertificateFile || !server->PrivateKeyFile) goto out_fail; if ((!PathFileExistsA(server->CertificateFile)) || (!PathFileExistsA(server->PrivateKeyFile))) { makecert = makecert_context_new(); if (!makecert) goto out_fail; if (makecert_context_process(makecert, makecert_argc, (char**)makecert_argv) < 0) goto out_fail; if (makecert_context_set_output_file_name(makecert, "shadow") != 1) goto out_fail; if (!PathFileExistsA(server->CertificateFile)) { if (makecert_context_output_certificate_file(makecert, filepath) != 1) goto out_fail; } if (!PathFileExistsA(server->PrivateKeyFile)) { if (makecert_context_output_private_key_file(makecert, filepath) != 1) goto out_fail; } } ret = TRUE; out_fail: makecert_context_free(makecert); free(filepath); return ret; } int shadow_server_init(rdpShadowServer* server) { int status; winpr_InitializeSSL(WINPR_SSL_INIT_DEFAULT); WTSRegisterWtsApiFunctionTable(FreeRDP_InitWtsApi()); if (!(server->clients = ArrayList_New(TRUE))) goto fail_client_array; if (!(server->StopEvent = CreateEvent(NULL, TRUE, FALSE, NULL))) goto fail_stop_event; if (!InitializeCriticalSectionAndSpinCount(&(server->lock), 4000)) goto fail_server_lock; status = shadow_server_init_config_path(server); if (status < 0) goto fail_config_path; status = shadow_server_init_certificate(server); if (status < 0) goto fail_certificate; server->listener = freerdp_listener_new(); if (!server->listener) goto fail_listener; server->listener->info = (void*)server; server->listener->PeerAccepted = shadow_client_accepted; server->subsystem = shadow_subsystem_new(); if (!server->subsystem) goto fail_subsystem_new; status = shadow_subsystem_init(server->subsystem, server); if (status >= 0) return status; shadow_subsystem_free(server->subsystem); fail_subsystem_new: freerdp_listener_free(server->listener); server->listener = NULL; fail_listener: free(server->CertificateFile); server->CertificateFile = NULL; free(server->PrivateKeyFile); server->PrivateKeyFile = NULL; fail_certificate: free(server->ConfigPath); server->ConfigPath = NULL; fail_config_path: DeleteCriticalSection(&(server->lock)); fail_server_lock: CloseHandle(server->StopEvent); server->StopEvent = NULL; fail_stop_event: ArrayList_Free(server->clients); server->clients = NULL; fail_client_array: WLog_ERR(TAG, "Failed to initialize shadow server"); return -1; } int shadow_server_uninit(rdpShadowServer* server) { if (!server) return -1; shadow_server_stop(server); shadow_subsystem_uninit(server->subsystem); shadow_subsystem_free(server->subsystem); freerdp_listener_free(server->listener); server->listener = NULL; free(server->CertificateFile); server->CertificateFile = NULL; free(server->PrivateKeyFile); server->PrivateKeyFile = NULL; free(server->ConfigPath); server->ConfigPath = NULL; DeleteCriticalSection(&(server->lock)); CloseHandle(server->StopEvent); server->StopEvent = NULL; ArrayList_Free(server->clients); server->clients = NULL; return 1; } rdpShadowServer* shadow_server_new(void) { rdpShadowServer* server; server = (rdpShadowServer*)calloc(1, sizeof(rdpShadowServer)); if (!server) return NULL; server->port = 3389; server->mayView = TRUE; server->mayInteract = TRUE; server->rfxMode = RLGR3; server->h264RateControlMode = H264_RATECONTROL_VBR; server->h264BitRate = 10000000; server->h264FrameRate = 30; server->h264QP = 0; server->authentication = FALSE; server->settings = freerdp_settings_new(FREERDP_SETTINGS_SERVER_MODE); return server; } void shadow_server_free(rdpShadowServer* server) { if (!server) return; free(server->ipcSocket); server->ipcSocket = NULL; freerdp_settings_free(server->settings); server->settings = NULL; free(server); }
./CrossVul/dataset_final_sorted/CWE-416/c/good_4494_0
crossvul-cpp_data_good_820_5
// SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <generated/utsrelease.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include "trace.h" #include "trace_output.h" /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ bool ring_buffer_expanded; /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If a tracer is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; /* Pipe tracepoints to printk */ struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ static DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static int __init set_cmdline_ftrace(char *str) { strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = true; return 1; } __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { allocate_snapshot = true; /* We also need the main ring buffer expanded */ ring_buffer_expanded = true; return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 0; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 0; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; int ret = -ENODEV; mutex_lock(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; ret = 0; break; } } mutex_unlock(&trace_types_lock); return ret; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } void trace_array_put(struct trace_array *this_tr) { mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); } int call_filter_check_discard(struct trace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && !filter_match_preds(call->filter, rec)) { __trace_event_discard_commit(buffer, event); return 1; } return 0; } void trace_free_pid_list(struct trace_pid_list *pid_list) { vfree(pid_list->pids); kfree(pid_list); } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { /* * If pid_max changed after filtered_pids was created, we * by default ignore all pids greater than the previous pid_max. */ if (search_pid >= filtered_pids->pid_max) return false; return test_bit(search_pid, filtered_pids->pids); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) { /* * Return false, because if filtered_pids does not exist, * all pids are good to trace. */ if (!filtered_pids) return false; return !trace_find_filtered_pid(filtered_pids, task->pid); } /** * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* Sorry, but we don't support pid_max changing after setting */ if (task->pid >= pid_list->pid_max) return; /* "self" is set for forks, and NULL for exits */ if (self) set_bit(task->pid, pid_list->pids); else clear_bit(task->pid, pid_list->pids); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { unsigned long pid = (unsigned long)v; (*pos)++; /* pid already is +1 of the actual prevous bit */ pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); /* Return pid + 1 to allow zero to be represented */ if (pid < pid_list->pid_max) return (void *)(pid + 1); return NULL; } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; loff_t l = 0; pid = find_first_bit(pid_list->pids, pid_list->pid_max); if (pid >= pid_list->pid_max) return NULL; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret = 0; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return -ENOMEM; pid_list->pid_max = READ_ONCE(pid_max); /* Only truncating will shrink pid_max */ if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) pid_list->pid_max = filtered_pids->pid_max; pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); if (!pid_list->pids) { kfree(pid_list); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ for_each_set_bit(pid, filtered_pids->pids, filtered_pids->pid_max) { set_bit(pid, pid_list->pids); nr_pids++; } } while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0 || !trace_parser_loaded(&parser)) break; read += ret; ubuf += ret; cnt -= ret; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; if (val >= pid_list->pid_max) break; pid = (pid_t)val; set_bit(pid, pid_list->pids); nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_free_pid_list(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_free_pid_list(pid_list); read = ret; pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer, cpu); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.trace_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been disabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned long flags, int pc) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, flags, pc); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_on(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); } else ring_buffer_unlock_commit(buffer, event); } /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; int alloc; int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return size; } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct ring_buffer_event *event; struct ring_buffer *buffer; struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); int pc; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, irq_flags, pc); if (!event) return 0; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); return 1; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); internal_trace_puts("*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); internal_trace_puts("*** stopping trace here! ***\n"); tracing_off(); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id(), cond_data); local_irq_restore(flags); } void tracing_snapshot_instance(struct trace_array *tr) { tracing_snapshot_instance_cond(tr, NULL); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/debug/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); /** * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. * @tr: The tracing instance to snapshot * @cond_data: The data to be tested conditionally, and possibly saved * * This is the same as tracing_snapshot() except that the snapshot is * conditional - the snapshot will only happen if the * cond_snapshot.update() implementation receiving the cond_data * returns true, which means that the trace array's cond_snapshot * update() operation used the cond_data to determine whether the * snapshot should be taken, and if it was, presumably saved it along * with the snapshot. */ void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { tracing_snapshot_instance_cond(tr, cond_data); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); /** * tracing_snapshot_cond_data - get the user data associated with a snapshot * @tr: The tracing instance * * When the user enables a conditional snapshot using * tracing_snapshot_cond_enable(), the user-defined cond_data is saved * with the snapshot. This accessor is used to retrieve it. * * Should not be called from cond_snapshot.update(), since it takes * the tr->max_lock lock, which the code calling * cond_snapshot.update() has already done. * * Returns the cond_data associated with the trace array's snapshot. */ void *tracing_cond_snapshot_data(struct trace_array *tr) { void *cond_data = NULL; arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) cond_data = tr->cond_snapshot->cond_data; arch_spin_unlock(&tr->max_lock); return cond_data; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; if (!tr->allocated_snapshot) { /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); /** * tracing_snapshot_cond_enable - enable conditional snapshot for an instance * @tr: The tracing instance * @cond_data: User data to associate with the snapshot * @update: Implementation of the cond_snapshot update function * * Check whether the conditional snapshot for the given instance has * already been enabled, or if the current tracer is already using a * snapshot; if so, return -EBUSY, else create a cond_snapshot and * save the cond_data and update function inside. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { struct cond_snapshot *cond_snapshot; int ret = 0; cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); if (!cond_snapshot) return -ENOMEM; cond_snapshot->cond_data = cond_data; cond_snapshot->update = update; mutex_lock(&trace_types_lock); ret = tracing_alloc_snapshot_instance(tr); if (ret) goto fail_unlock; if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto fail_unlock; } /* * The cond_snapshot can only change to NULL without the * trace_types_lock. We don't care if we race with it going * to NULL, but we want to make sure that it's not set to * something other than NULL when we get here, which we can * do safely with only holding the trace_types_lock and not * having to take the max_lock. */ if (tr->cond_snapshot) { ret = -EBUSY; goto fail_unlock; } arch_spin_lock(&tr->max_lock); tr->cond_snapshot = cond_snapshot; arch_spin_unlock(&tr->max_lock); mutex_unlock(&trace_types_lock); return ret; fail_unlock: mutex_unlock(&trace_types_lock); kfree(cond_snapshot); return ret; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); /** * tracing_snapshot_cond_disable - disable conditional snapshot for an instance * @tr: The tracing instance * * Check whether the conditional snapshot for the given instance is * enabled; if so, free the cond_snapshot associated with it, * otherwise return -EINVAL. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_disable(struct trace_array *tr) { int ret = 0; arch_spin_lock(&tr->max_lock); if (!tr->cond_snapshot) ret = -EINVAL; else { kfree(tr->cond_snapshot); tr->cond_snapshot = NULL; } arch_spin_unlock(&tr->max_lock); return ret; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); void *tracing_cond_snapshot_data(struct trace_array *tr) { return NULL; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); int tracing_snapshot_cond_disable(struct trace_array *tr) { return false; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->trace_buffer.buffer) ring_buffer_record_off(tr->trace_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) tracing_off(); } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->trace_buffer.buffer) return ring_buffer_record_is_on(tr->trace_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* nr_entries can not be zero */ if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit postions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; ret = read; goto out; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto out; } *ppos += read; ret = read; out: return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->seq.readpos) return -EBUSY; len = trace_seq_used(s) - s->seq.readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->seq.readpos, cnt); s->seq.readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_buffer *trace_buf = &tr->trace_buffer; struct trace_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * @cond_data: User data associated with a conditional snapshot * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, void *cond_data) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from trace_buffer */ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); #ifdef CONFIG_TRACER_SNAPSHOT if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) goto out_unlock; #endif swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); out_unlock: arch_spin_unlock(&tr->max_lock); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr - tracer * @tsk - task with the latency * @cpu - the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ static int wait_on_pipe(struct trace_iterator *iter, int full) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, full); } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->trace_buffer); tr->current_trace = type; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; mutex_lock(&trace_types_lock); if (list_empty(&postponed_selftests)) goto out; pr_info("Running postponed tracer tests:\n"); list_for_each_entry_safe(p, n, &postponed_selftests, list) { ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } out: mutex_unlock(&trace_types_lock); return 0; } core_initcall(init_trace_selftests); #else static inline int run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. * @type - the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } mutex_lock(&trace_types_lock); tracing_selftest_running = true; for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) { /*allocate a dummy tracer_flags*/ type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); if (!type->flags) { ret = -ENOMEM; goto out; } type->flags->val = 0; type->flags->opts = dummy_tracer_opt; } else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; /* store the tracer for __set_tracer_option */ type->flags->trace = type; ret = run_tracer_selftest(type); if (ret < 0) goto out; type->next = trace_types; trace_types = type; add_tracer_options(&global_trace, type); out: tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ tracing_selftest_disabled = true; #ifdef CONFIG_FTRACE_STARTUP_TEST printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", type->name); #endif out_unlock: return ret; } void tracing_reset(struct trace_buffer *buf, int cpu) { struct ring_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct trace_buffer *buf) { struct ring_buffer *buffer = buf->buffer; int cpu; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); for_each_online_cpu(cpu) ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus(void) { struct trace_array *tr; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } static int *tgid_map; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; char *saved_cmdlines; }; static struct saved_cmdlines_buffer *savedcmd; /* temporary disable recording */ static atomic_t trace_record_taskinfo_disabled __read_mostly; static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; } static inline void set_cmdline(int idx, const char *cmdline) { strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); } static int allocate_cmdlines_buffer(unsigned int val, struct saved_cmdlines_buffer *s) { s->map_cmdline_to_pid = kmalloc_array(val, sizeof(*s->map_cmdline_to_pid), GFP_KERNEL); if (!s->map_cmdline_to_pid) return -ENOMEM; s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); if (!s->saved_cmdlines) { kfree(s->map_cmdline_to_pid); return -ENOMEM; } s->cmdline_idx = 0; s->cmdline_num = val; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); return 0; } static int trace_create_savedcmd(void) { int ret; savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); if (!savedcmd) return -ENOMEM; ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); if (ret < 0) { kfree(savedcmd); savedcmd = NULL; return -ENOMEM; } return 0; } int is_tracing_stopped(void) { return global_trace.stop_count; } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (--global_trace.stop_count) { if (global_trace.stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); global_trace.stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_start_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; /* If global, we need to also start the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_start(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (tr->stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); tr->stop_count = 0; } goto out; } buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { struct ring_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&global_trace.max_lock); buffer = global_trace.trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = global_trace.max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&global_trace.max_lock); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } static void tracing_stop_tr(struct trace_array *tr) { struct ring_buffer *buffer; unsigned long flags; /* If global, we need to also stop the max tracer */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return tracing_stop(); raw_spin_lock_irqsave(&tr->start_lock, flags); if (tr->stop_count++) goto out; buffer = tr->trace_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } static int trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(tsk->pid > PID_MAX_DEFAULT)) return 0; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; idx = savedcmd->map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; /* * Check whether the cmdline buffer at idx has a pid * mapped. We are going to overwrite that entry so we * need to clear the map_pid_to_cmdline. Otherwise we * would read the new comm for the old pid. */ pid = savedcmd->map_cmdline_to_pid[idx]; if (pid != NO_CMDLINE_MAP) savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; savedcmd->map_cmdline_to_pid[idx] = tsk->pid; savedcmd->map_pid_to_cmdline[tsk->pid] = idx; savedcmd->cmdline_idx = idx; } set_cmdline(idx, tsk->comm); arch_spin_unlock(&trace_cmdline_lock); return 1; } static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; if (!pid) { strcpy(comm, "<idle>"); return; } if (WARN_ON_ONCE(pid < 0)) { strcpy(comm, "<XXX>"); return; } if (pid > PID_MAX_DEFAULT) { strcpy(comm, "<...>"); return; } map = savedcmd->map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); else strcpy(comm, "<...>"); } void trace_find_cmdline(int pid, char comm[]) { preempt_disable(); arch_spin_lock(&trace_cmdline_lock); __trace_find_cmdline(pid, comm); arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } int trace_find_tgid(int pid) { if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) return 0; return tgid_map[pid]; } static int trace_save_tgid(struct task_struct *tsk) { /* treat recording of idle task as a success */ if (!tsk->pid) return 1; if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) return 0; tgid_map[tsk->pid] = tsk->tgid; return 1; } static bool tracing_record_taskinfo_skip(int flags) { if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) return true; if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) return true; if (!__this_cpu_read(trace_taskinfo_save)) return true; return false; } /** * tracing_record_taskinfo - record the task info of a task * * @task - task to record * @flags - TRACE_RECORD_CMDLINE for recording comm * - TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo(struct task_struct *task, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * * @prev - previous task during sched_switch * @next - next task during sched_switch * @flags - TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid */ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags) { bool done; if (tracing_record_taskinfo_skip(flags)) return; /* * Record as much task information as possible. If some fail, continue * to try to record the others. */ done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); /* If recording any information failed, retry again soon. */ if (!done) return; __this_cpu_write(trace_taskinfo_save, false); } /* Helpers to record a specific task information */ void tracing_record_cmdline(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); } void tracing_record_tgid(struct task_struct *task) { tracing_record_taskinfo(task, TRACE_RECORD_TGID); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | #else TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto failed; event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; preempt_disable(); if (cpu == smp_processor_id() && this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); preempt_enable(); } return; failed: trace_buffered_event_disable(); } static void enable_trace_buffered_event(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; preempt_disable(); /* For each CPU, set the buffer as used. */ smp_call_function_many(tracing_buffer_mask, disable_trace_buffered_event, NULL, 1); preempt_enable(); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Make sure trace_buffered_event is NULL before clearing * trace_buffered_event_cnt. */ smp_wmb(); preempt_disable(); /* Do the work on each cpu */ smp_call_function_many(tracing_buffer_mask, enable_trace_buffered_event, NULL, 1); preempt_enable(); } static struct ring_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *entry; int val; *current_rb = trace_file->tr->trace_buffer.buffer; if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && (entry = this_cpu_read(trace_buffered_event))) { /* Try to use the per cpu buffer first */ val = this_cpu_inc_return(trace_buffered_event_cnt); if (val == 1) { trace_event_setup(entry, type, flags, pc); entry->array[0] = len; return entry; } this_cpu_dec(trace_buffered_event_cnt); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the tigger to use. It's recusive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; event = &fbuffer->trace_file->event_call->event; spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; mutex_lock(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) goto out; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); out: mutex_unlock(&tracepoint_printk_mutex); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, fbuffer->event, fbuffer->entry, fbuffer->flags, fbuffer->pc); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event) { struct trace_entry *entry; unsigned int size = 0; entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); static inline void ftrace_exports_enable(void) { static_branch_enable(&ftrace_exports_enabled); } static inline void ftrace_exports_disable(void) { static_branch_disable(&ftrace_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event) { struct trace_export *export; preempt_disable_notrace(); export = rcu_dereference_raw_notrace(ftrace_exports_list); while (export) { trace_process_export(export, event); export = rcu_dereference_raw_notrace(export->next); } preempt_enable_notrace(); } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { if (*list == NULL) ftrace_exports_enable(); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); if (*list == NULL) ftrace_exports_disable(); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; mutex_lock(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { int ret; mutex_lock(&ftrace_export_lock); ret = rm_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return ret; } EXPORT_SYMBOL_GPL(unregister_ftrace_export); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct trace_event_call *call = &event_function; struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (!call_filter_check_discard(call, entry, buffer, event)) { if (static_branch_unlikely(&ftrace_exports_enabled)) ftrace_exports(event); __buffer_unlock_commit(buffer, event); } } #ifdef CONFIG_STACKTRACE #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) struct ftrace_stack { unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; }; static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; int use_stack; int size = FTRACE_STACK_ENTRIES; trace.nr_entries = 0; trace.skip = skip; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) trace.skip++; #endif /* * Since events can happen in NMIs there's no safe way to * use the per cpu ftrace_stacks. We reserve it and if an interrupt * or NMI comes in, it will just have to use the default * FTRACE_STACK_SIZE. */ preempt_disable_notrace(); use_stack = __this_cpu_inc_return(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would * have exited and put the counter back to what we want. * We just need a barrier to keep gcc from moving things * around. */ barrier(); if (use_stack == 1) { trace.entries = this_cpu_ptr(ftrace_stack.calls); trace.max_entries = FTRACE_STACK_MAX_ENTRIES; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); if (trace.nr_entries > size) size = trace.nr_entries; } else /* From now on, use_stack is a boolean */ use_stack = 0; size *= sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); memset(&entry->caller, 0, size); if (use_stack) memcpy(&entry->caller, trace.entries, trace.nr_entries * sizeof(unsigned long)); else { trace.max_entries = FTRACE_STACK_ENTRIES; trace.entries = entry->caller; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); } entry->size = trace.nr_entries; if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } static inline void ftrace_trace_stack(struct trace_array *tr, struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { struct ring_buffer *buffer = tr->trace_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(buffer, flags, skip, pc, NULL); return; } /* * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and rcu_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; rcu_irq_enter_irqson(); __ftrace_trace_stack(buffer, flags, skip, pc, NULL); rcu_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { unsigned long flags; if (tracing_disabled || tracing_selftest_running) return; local_save_flags(flags); #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, skip, preempt_count(), NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = 0; trace.entries = entry->caller; save_stack_trace_user(&trace); if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED static void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, flags, preempt_count()); } #endif /* UNUSED */ #endif /* CONFIG_STACKTRACE */ /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct *trace_percpu_buffer; /* * Thise allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct *buffers; buffers = alloc_percpu(struct trace_buffer_struct); if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.trace_buffer.buffer) tracing_start_cmdline_record(); } void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_bprint; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array *tr = &global_trace; struct bprint_entry *entry; unsigned long flags; char *tbuffer; int len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out; local_save_flags(flags); size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; int len = 0, size, pc; struct print_entry *entry; unsigned long flags; char *tbuffer; if (tracing_disabled || tracing_selftest_running) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); local_save_flags(flags); size = sizeof(*entry) + len + 1; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); } out: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } __printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } __printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_read(buf_iter, NULL); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, lost_events); if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct ring_buffer *buffer = iter->trace_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, NULL, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { if (ts >= iter->trace_buffer->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; /* * copy the tracer to avoid using a global lock all around. * iter->trace is a copy of current_trace, the pointer to the * name may be used instead of a strcmp(), as iter->trace->name * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) *iter->trace = *tr->current_trace; mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (!iter->snapshot) atomic_inc(&trace_record_taskinfo_disabled); if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif if (!iter->snapshot) atomic_dec(&trace_record_taskinfo_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries(struct trace_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long count; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total += count; } else *total += count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries += count; } } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off \n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / delay \n" "# cmd pid ||||| time | caller \n" "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; const char tgid_space[] = " "; const char space[] = " "; print_event_info(buf, m); seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? tgid_space : space); seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct trace_buffer *buf = iter->trace_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = "preemption"; name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, #if defined(CONFIG_PREEMPT_NONE) "server", #elif defined(CONFIG_PREEMPT_VOLUNTARY) "desktop", #elif defined(CONFIG_PREEMPT) "preempt", #else "unknown", #endif /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) return event->funcs->trace(iter, sym_flags, event); trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->trace_buffer, m, trace_flags); else print_func_help_header(iter->trace_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { print_trace_line(iter); ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ mutex_lock(&trace_types_lock); iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) goto fail; *iter->trace = *tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->trace_buffer = &tr->max_buffer; else #endif iter->trace_buffer = &tr->trace_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* stop the trace while dumping if we are not opening "snapshot" */ if (!iter->snapshot) tracing_stop_tr(tr); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu, GFP_KERNEL); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu, GFP_KERNEL); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); kfree(iter->buffer_iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ static int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; filp->private_data = inode->i_private; return 0; } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (!iter->snapshot) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter->buffer_iter); seq_release_private(inode, file); return 0; } static int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct trace_buffer *trace_buf = &tr->trace_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; if (tracing_disabled) return -ENODEV; ret = seq_open(file, &show_traces_seq_ops); if (ret) return ret; m = file->private_data; m->private = tr; return 0; } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: kfree(mask_str); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err, cpu; if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); free_cpumask_var(tracing_cpumask_new); return count; err_unlock: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; u32 tracer_flags; int i; mutex_lock(&trace_types_lock); tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } mutex_unlock(&trace_types_lock); return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret; ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { if (!tgid_map) tgid_map = kcalloc(PID_MAX_DEFAULT + 1, sizeof(*tgid_map), GFP_KERNEL); if (!tgid_map) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; } trace_event_enable_tgid_record(enabled); } if (mask == TRACE_ITER_EVENT_FORK) trace_event_follow_fork(tr, enabled); if (mask == TRACE_ITER_FUNC_FORK) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif } if (mask == TRACE_ITER_PRINTK) { trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); } return 0; } static int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t-change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t-view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by adding a suffix 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t -:[<group>/]<event>\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n" #else "\t $stack<index>, $stack, $retval, $comm\n" #endif "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>,\n" "\t <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [:<handler>.<action>]\n" "\t [if <filter>]\n\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analogous to\n" "\t the enable_event and disable_event triggers.\n\n" "\t Hist trigger handlers and actions are executed whenever a\n" "\t a histogram entry is added or updated. They take the form:\n\n" "\t <handler>.<action>\n\n" "\t The available handlers are:\n\n" "\t onmatch(matching.event) - invoke on addition or update\n" "\t onmax(var) - invoke if var exceeds current max\n" "\t onchange(var) - invoke action if var changes\n\n" "\t The available actions are:\n\n" "\t trace(<synthetic_event>,param list) - generate synthetic event\n" "\t save(field,...) - save current event fields\n" #ifdef CONFIG_TRACER_SNAPSHOT "\t snapshot() - snapshot the trace buffer\n" #endif #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) { int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { if (trace_find_tgid(*ptr)) return ptr; } return NULL; } static void *saved_tgids_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; if (!tgid_map) return NULL; v = &tgid_map[0]; while (l <= *pos) { v = saved_tgids_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_tgids_stop(struct seq_file *m, void *v) { } static int saved_tgids_show(struct seq_file *m, void *v) { int pid = (int *)v - tgid_map; seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); return 0; } static const struct seq_operations tracing_saved_tgids_seq_ops = { .start = saved_tgids_start, .stop = saved_tgids_stop, .next = saved_tgids_next, .show = saved_tgids_show, }; static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_tgids_seq_ops); } static const struct file_operations tracing_saved_tgids_fops = { .open = tracing_saved_tgids_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) { unsigned int *ptr = v; if (*pos || m->count) ptr++; (*pos)++; for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; ptr++) { if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) continue; return ptr; } return NULL; } static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) { void *v; loff_t l = 0; preempt_disable(); arch_spin_lock(&trace_cmdline_lock); v = &savedcmd->map_cmdline_to_pid[0]; while (l <= *pos) { v = saved_cmdlines_next(m, v, &l); if (!v) return NULL; } return v; } static void saved_cmdlines_stop(struct seq_file *m, void *v) { arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } static int saved_cmdlines_show(struct seq_file *m, void *v) { char buf[TASK_COMM_LEN]; unsigned int *pid = v; __trace_find_cmdline(*pid, buf); seq_printf(m, "%d %s\n", *pid, buf); return 0; } static const struct seq_operations tracing_saved_cmdlines_seq_ops = { .start = saved_cmdlines_start, .next = saved_cmdlines_next, .stop = saved_cmdlines_stop, .show = saved_cmdlines_show, }; static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_saved_cmdlines_seq_ops); } static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_saved_cmdlines_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static ssize_t tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) { kfree(s->saved_cmdlines); kfree(s->map_cmdline_to_pid); kfree(s); } static int tracing_resize_saved_cmdlines(unsigned int val) { struct saved_cmdlines_buffer *s, *savedcmd_temp; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; if (allocate_cmdlines_buffer(val, s) < 0) { kfree(s); return -ENOMEM; } arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); free_saved_cmdlines_buffer(savedcmd_temp); return 0; } static ssize_t tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry or less than PID_MAX_DEFAULT */ if (!val || val > PID_MAX_DEFAULT) return -EINVAL; ret = tracing_resize_saved_cmdlines((unsigned int)val); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations tracing_saved_cmdlines_size_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_size_read, .write = tracing_saved_cmdlines_size_write, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; (*pos)++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } mutex_lock(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); mutex_unlock(&trace_eval_mutex); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", 0444, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_insert_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; if (len <= 0) return; map = start; trace_event_eval_update(map, len); trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); r = sprintf(buf, "%s\n", tr->current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->trace_buffer); return t->init(tr); } static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ ring_buffer_expanded = true; /* May be called before buffers are initialized */ if (!tr->trace_buffer.buffer) return 0; ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); if (ret < 0) return ret; #ifdef CONFIG_TRACER_MAX_TRACE if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || !tr->current_trace->use_max_tr) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->trace_buffer, &tr->trace_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } return ret; } if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->max_buffer, size); else per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; out: #endif /* CONFIG_TRACER_MAX_TRACE */ if (cpu == RING_BUFFER_ALL_CPUS) set_buffer_entries(&tr->trace_buffer, size); else per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; return ret; } static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { int ret = size; mutex_lock(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { ret = -EINVAL; goto out; } } ret = __tracing_resize_ring_buffer(tr, size, cpu_id); if (ret < 0) ret = -ENOMEM; out: mutex_unlock(&trace_types_lock); return ret; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(void) { int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer); /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; } static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; create_trace_option_files(tr, t); } static int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) goto out; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -EINVAL; goto out; } if (t == tr->current_trace) goto out; #ifdef CONFIG_TRACER_SNAPSHOT if (t->use_max_tr) { arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); if (ret) goto out; } #endif /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); goto out; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(t, tr)) { ret = -EINVAL; goto out; } /* If trace pipe files are being read, we can't change the tracer */ if (tr->current_trace->ref) { ret = -EBUSY; goto out; } trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->allocated_snapshot; if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); } #endif #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } #endif if (t->init) { ret = tracer_init(t, tr); if (ret) goto out; } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); return ret; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; err = tracing_set_tracer(tr, buf); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; mutex_lock(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) goto out; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) goto out; } ret = cnt; out: mutex_unlock(&trace_types_lock); return ret; } #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); } #endif static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret = 0; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; mutex_lock(&trace_types_lock); /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; __trace_array_put(tr); goto out; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->trace_buffer = &tr->trace_buffer; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->current_trace->ref++; out: mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter); __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); tr->current_trace->ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); mutex_destroy(&iter->mutex); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER_BLOCK) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, filp, poll_table); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) goto out; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static const struct pipe_buf_operations tracing_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), trace_seq_used(&iter->seq)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = trace_seq_used(&iter->seq); trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; const char faulted[] = "<faulted>"; ssize_t written; int size; int len; /* Used in tracing_mark_raw_write() as well */ #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ /* If less than "<faulted>", then make sure we can still add that */ if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (unlikely(!event)) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); if (len) { memcpy(&entry->buf, faulted, FAULTED_SIZE); cnt = FAULTED_SIZE; written = -EFAULT; } else written = cnt; len = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); if (written > 0) *fpos += written; return written; } /* Limit it for now to 3K (including tag) */ #define RAW_DATA_MAX_SIZE (1024*3) static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; struct ring_buffer *buffer; struct raw_data_entry *entry; const char faulted[] = "<faulted>"; unsigned long irq_flags; ssize_t written; int size; int len; #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); local_save_flags(irq_flags); size = sizeof(*entry) + cnt; if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; buffer = tr->trace_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, irq_flags, preempt_count()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); if (len) { entry->id = -1; memcpy(&entry->buf, faulted, FAULTED_SIZE); written = -EFAULT; } else written = cnt; __buffer_unlock_commit(buffer, event); if (written > 0) *fpos += written; return written; } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; mutex_lock(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; mutex_lock(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); mutex_unlock(&trace_types_lock); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr)) return -ENODEV; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) { int ret = 0; mutex_lock(&trace_types_lock); if (abs && tr->time_stamp_abs_ref++) goto out; if (!abs) { if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { ret = -EINVAL; goto out; } if (--tr->time_stamp_abs_ref) goto out; } ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); #endif out: mutex_unlock(&trace_types_lock); return ret; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret = 0; if (trace_array_get(tr) < 0) return -ENODEV; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->trace_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; mutex_lock(&trace_types_lock); if (tr->current_trace->use_max_tr) { ret = -EBUSY; goto out; } arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); if (ret) goto out; switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { ret = -EINVAL; break; } #endif if (!tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) update_max_tr(tr, current, smp_processor_id(), NULL); else update_max_tr_single(tr, current, iter->cpu_file); local_irq_enable(); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } out: mutex_unlock(&trace_types_lock); return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.trace_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_open_generic_tr, .write = tracing_mark_raw_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; #endif /* CONFIG_TRACER_SNAPSHOT */ static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; if (tracing_disabled) return -ENODEV; if (trace_array_get(tr) < 0) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.trace_buffer = &tr->trace_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->current_trace->ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->trace_buffer->buffer, &info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = PAGE_SIZE - info->read; if (size > count) size = count; ret = copy_to_user(ubuf, info->spare + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; mutex_lock(&trace_types_lock); iter->tr->current_trace->ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare_cpu, info->spare); kfree(info); mutex_unlock(&trace_types_lock); return 0; } struct buffer_ref { struct ring_buffer *buffer; void *page; int cpu; int ref; }; static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); buf->private = 0; } static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (ref->ref > INT_MAX/2) return false; ref->ref++; return true; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif if (*ppos & (PAGE_SIZE - 1)) return -EINVAL; if (len & (PAGE_SIZE - 1)) { if (len < PAGE_SIZE) return -EINVAL; len &= PAGE_MASK; } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, &ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ref->page); spd.pages[i] = page; spd.partial[i].len = PAGE_SIZE; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += PAGE_SIZE; entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->tr->buffer_percent); if (ret) goto out; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct trace_buffer *trace_buf = &tr->trace_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer, cpu)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *p = filp->private_data; char buf[64]; /* Not too big for a shallow stack */ int r; r = scnprintf(buf, 63, "%ld", *p); buf[r++] = '\n'; return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') return unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; ret = register_ftrace_function_probe(glob, tr, ops, count); out: return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); WARN_ONCE(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", 0444, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", 0644, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", 0444, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_cpu_file("snapshot", 0644, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", 0444, d_cpu, tr, cpu, &snapshot_raw_fops); #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, 0644, t_options, topt, &trace_options_fops); } static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; int i; if (!tracer) return; flags = tracer->flags; if (!flags || !flags->opts) return; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return; for (i = 0; i < tr->nr_topts; i++) { /* Make sure there's no duplicate flags. */ if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) return; } opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); WARN_ONCE(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, 0644, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) create_trace_option_core_file(tr, trace_options[i], i); } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->trace_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { mutex_lock(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); } mutex_unlock(&trace_types_lock); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; if (!val) val = 1; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) { enum ring_buffer_flags rb_flags; rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; buf->tr = tr; buf->buffer = ring_buffer_alloc(size, rb_flags); if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->trace_buffer, ring_buffer_size(tr->trace_buffer.buffer, 0)); return 0; } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; /* * Only the top level trace array gets its snapshot allocated * from the kernel command line. */ allocate_snapshot = false; #endif return 0; } static void free_trace_buffer(struct trace_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static void __update_tracer_options(struct trace_array *tr) { struct tracer *t; for (t = trace_types; t; t = t->next) add_tracer_options(tr, t); } static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -EEXIST; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) goto out_unlock; } ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) goto out_unlock; tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; tr->current_trace = &nop_trace; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; tr->dir = tracefs_create_dir(name, trace_instance_dir); if (!tr->dir) goto out_free_tr; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove_recursive(tr->dir); goto out_free_tr; } ftrace_init_trace_array(tr); init_tracer_tracefs(tr, tr->dir); init_trace_flags_index(tr); __update_tracer_options(tr); list_add(&tr->list, &ftrace_trace_arrays); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return 0; out_free_tr: free_trace_buffers(tr); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static int instance_rmdir(const char *name) { struct trace_array *tr; int found = 0; int ret; int i; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = -ENODEV; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { found = 1; break; } } if (!found) goto out_unlock; ret = -EBUSY; if (tr->ref || (tr->current_trace && tr->current_trace->ref)) goto out_unlock; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1 << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1 << i, 0); } tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove_recursive(tr->dir); free_trace_buffers(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); ret = 0; out_unlock: mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); return ret; } static __init void create_trace_instances(struct dentry *d_tracer) { trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (WARN_ON(!trace_instance_dir)) return; } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { struct trace_event_file *file; int cpu; trace_create_file("available_tracers", 0444, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", 0644, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", 0644, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", 0644, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", 0644, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", 0444, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); file = __find_event_file(tr, "ftrace", "print"); if (file && file->dir) trace_create_file("trigger", 0644, file->dir, file, &event_trigger_fops); tr->trace_marker_file = file; trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", 0644, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", 0444, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", 0444, d_tracer, tr, &buffer_percent_fops); create_trace_options_dir(tr); #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) trace_create_file("tracing_max_latency", 0644, d_tracer, &tr->max_latency, &tracing_max_lat_fops); #endif if (ftrace_create_function_files(tr, d_tracer)) WARN(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, tr, &snapshot_fops); #endif for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; mnt = vfs_submount(mntpt, type, "tracefs", NULL); put_filesystem(type); if (IS_ERR(mnt)) return NULL; mntget(mnt); return mnt; } /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ struct dentry *tracing_init_dentry(void) { struct trace_array *tr = &global_trace; /* The top level trace array uses NULL as parent */ if (tr->dir) return NULL; if (WARN_ON(!tracefs_initialized()) || (IS_ENABLED(CONFIG_DEBUG_FS) && WARN_ON(!debugfs_initialized()))) return ERR_PTR(-ENODEV); /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kerenl. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); if (!tr->dir) { pr_warn_once("Could not create debugfs directory 'tracing'\n"); return ERR_PTR(-ENOMEM); } return NULL; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static void __init trace_eval_init(void) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); } #ifdef CONFIG_MODULES static void trace_module_add_evals(struct module *mod) { if (!mod->num_trace_evals) return; /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; mutex_lock(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) goto out; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); out: mutex_unlock(&trace_eval_mutex); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); break; } return 0; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init int tracer_init_tracefs(void) { struct dentry *d_tracer; trace_access_lock_init(); d_tracer = tracing_init_dentry(); if (IS_ERR(d_tracer)) return 0; event_trace_init(); init_tracer_tracefs(&global_trace, d_tracer); ftrace_init_tracefs_toplevel(&global_trace, d_tracer); trace_create_file("tracing_thresh", 0644, d_tracer, &global_trace, &tracing_thresh_fops); trace_create_file("README", 0444, d_tracer, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", 0444, d_tracer, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", 0644, d_tracer, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", 0444, d_tracer, NULL, &tracing_saved_tgids_fops); trace_eval_init(); trace_create_eval_file(d_tracer); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif create_trace_instances(d_tracer); update_tracer_options(&global_trace); return 0; } static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); return NOTIFY_OK; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } void trace_init_global_iter(struct trace_iterator *iter) { iter->tr = &global_trace; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->trace_buffer = &global_trace.trace_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->trace_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; static atomic_t dump_running; struct trace_array *tr = &global_trace; unsigned int old_userobj; unsigned long flags; int cnt = 0, cpu; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracing_off(); local_irq_save(flags); printk_nmi_direct_enter(); /* Simulate the iterator */ trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = RING_BUFFER_ALL_CPUS; break; case DUMP_ORIG: iter.cpu_file = raw_smp_processor_id(); break; case DUMP_NONE: goto out_enable; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); iter.cpu_file = RING_BUFFER_ALL_CPUS; } printk(KERN_TRACE "Dumping ftrace buffer:\n"); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); out_enable: tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); } atomic_dec(&dump_running); printk_nmi_direct_exit(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ftrace_dump); int trace_run_command(const char *buf, int (*createfn)(int, char **)) { char **argv; int argc, ret; argc = 0; ret = 0; argv = argv_split(GFP_KERNEL, buf, &argc); if (!argv) return -ENOMEM; if (argc) ret = createfn(argc, argv); argv_free(argv); return ret; } #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(int, char **)) { char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); ret = -EINVAL; goto out; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = trace_run_command(buf, createfn); if (ret) goto out; buf += size; } while (done < count); } ret = done; out: kfree(kbuf); return ret; } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; /* * Make sure we don't accidently add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:preapre", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); goto out_free_savedcmd; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; ftrace_init_global_array_ops(&global_trace); init_trace_flags_index(&global_trace); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); list_add(&global_trace.list, &ftrace_trace_arrays); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_savedcmd: free_saved_cmdlines_buffer(savedcmd); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (WARN_ON(!tracepoint_print_iter)) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); } void __init trace_init(void) { trace_event_init(); } __init static int clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return 0; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; return 0; } fs_initcall(tracer_init_tracefs); late_initcall_sync(clear_boot_tracer); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static int tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } return 0; } late_initcall_sync(tracing_set_default_clock); #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_820_5
crossvul-cpp_data_good_2906_0
/* * USB Serial Console driver * * Copyright (C) 2001 - 2002 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * Thanks to Randy Dunlap for the original version of this code. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> struct usbcons_info { int magic; int break_flag; struct usb_serial_port *port; }; static struct usbcons_info usbcons_info; static struct console usbcons; /* * ------------------------------------------------------------ * USB Serial console driver * * Much of the code here is copied from drivers/char/serial.c * and implements a phony serial console in the same way that * serial.c does so that in case some software queries it, * it will get the same results. * * Things that are different from the way the serial port code * does things, is that we call the lower level usb-serial * driver code to initialize the device, and we set the initial * console speeds based on the command line arguments. * ------------------------------------------------------------ */ static const struct tty_operations usb_console_fake_tty_ops = { }; /* * The parsing of the command line works exactly like the * serial.c code, except that the specifier is "ttyUSB" instead * of "ttyS". */ static int usb_console_setup(struct console *co, char *options) { struct usbcons_info *info = &usbcons_info; int baud = 9600; int bits = 8; int parity = 'n'; int doflow = 0; int cflag = CREAD | HUPCL | CLOCAL; char *s; struct usb_serial *serial; struct usb_serial_port *port; int retval; struct tty_struct *tty = NULL; struct ktermios dummy; if (options) { baud = simple_strtoul(options, NULL, 10); s = options; while (*s >= '0' && *s <= '9') s++; if (*s) parity = *s++; if (*s) bits = *s++ - '0'; if (*s) doflow = (*s++ == 'r'); } /* Sane default */ if (baud == 0) baud = 9600; switch (bits) { case 7: cflag |= CS7; break; default: case 8: cflag |= CS8; break; } switch (parity) { case 'o': case 'O': cflag |= PARODD; break; case 'e': case 'E': cflag |= PARENB; break; } co->cflag = cflag; /* * no need to check the index here: if the index is wrong, console * code won't call us */ port = usb_serial_port_get_by_minor(co->index); if (port == NULL) { /* no device is connected yet, sorry :( */ pr_err("No USB device connected to ttyUSB%i\n", co->index); return -ENODEV; } serial = port->serial; retval = usb_autopm_get_interface(serial->interface); if (retval) goto error_get_interface; tty_port_tty_set(&port->port, NULL); info->port = port; ++port->port.count; if (!tty_port_initialized(&port->port)) { if (serial->type->set_termios) { /* * allocate a fake tty so the driver can initialize * the termios structure, then later call set_termios to * configure according to command line arguments */ tty = kzalloc(sizeof(*tty), GFP_KERNEL); if (!tty) { retval = -ENOMEM; goto reset_open_count; } kref_init(&tty->kref); tty->driver = usb_serial_tty_driver; tty->index = co->index; init_ldsem(&tty->ldisc_sem); spin_lock_init(&tty->files_lock); INIT_LIST_HEAD(&tty->tty_files); kref_get(&tty->driver->kref); __module_get(tty->driver->owner); tty->ops = &usb_console_fake_tty_ops; tty_init_termios(tty); tty_port_tty_set(&port->port, tty); } /* only call the device specific open if this * is the first time the port is opened */ retval = serial->type->open(NULL, port); if (retval) { dev_err(&port->dev, "could not open USB console port\n"); goto fail; } if (serial->type->set_termios) { tty->termios.c_cflag = cflag; tty_termios_encode_baud_rate(&tty->termios, baud, baud); memset(&dummy, 0, sizeof(struct ktermios)); serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); tty_kref_put(tty); } tty_port_set_initialized(&port->port, 1); } /* Now that any required fake tty operations are completed restore * the tty port count */ --port->port.count; /* The console is special in terms of closing the device so * indicate this port is now acting as a system console. */ port->port.console = 1; mutex_unlock(&serial->disc_mutex); return retval; fail: tty_port_tty_set(&port->port, NULL); tty_kref_put(tty); reset_open_count: port->port.count = 0; usb_autopm_put_interface(serial->interface); error_get_interface: usb_serial_put(serial); mutex_unlock(&serial->disc_mutex); return retval; } static void usb_console_write(struct console *co, const char *buf, unsigned count) { static struct usbcons_info *info = &usbcons_info; struct usb_serial_port *port = info->port; struct usb_serial *serial; int retval = -ENODEV; if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) return; serial = port->serial; if (count == 0) return; dev_dbg(&port->dev, "%s - %d byte(s)\n", __func__, count); if (!port->port.console) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } while (count) { unsigned int i; unsigned int lf; /* search for LF so we can insert CR if necessary */ for (i = 0, lf = 0 ; i < count ; i++) { if (*(buf + i) == 10) { lf = 1; i++; break; } } /* pass on to the driver specific version of this function if it is available */ retval = serial->type->write(NULL, port, buf, i); dev_dbg(&port->dev, "%s - write: %d\n", __func__, retval); if (lf) { /* append CR after LF */ unsigned char cr = 13; retval = serial->type->write(NULL, port, &cr, 1); dev_dbg(&port->dev, "%s - write cr: %d\n", __func__, retval); } buf += i; count -= i; } } static struct tty_driver *usb_console_device(struct console *co, int *index) { struct tty_driver **p = (struct tty_driver **)co->data; if (!*p) return NULL; *index = co->index; return *p; } static struct console usbcons = { .name = "ttyUSB", .write = usb_console_write, .device = usb_console_device, .setup = usb_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &usb_serial_tty_driver, }; void usb_serial_console_disconnect(struct usb_serial *serial) { if (serial->port[0] && serial->port[0] == usbcons_info.port) { usb_serial_console_exit(); usb_serial_put(serial); } } void usb_serial_console_init(int minor) { if (minor == 0) { /* * Call register_console() if this is the first device plugged * in. If we call it earlier, then the callback to * console_setup() will fail, as there is not a device seen by * the USB subsystem yet. */ /* * Register console. * NOTES: * console_setup() is called (back) immediately (from * register_console). console_write() is called immediately * from register_console iff CON_PRINTBUFFER is set in flags. */ pr_debug("registering the USB serial console.\n"); register_console(&usbcons); } } void usb_serial_console_exit(void) { if (usbcons_info.port) { unregister_console(&usbcons); usbcons_info.port->port.console = 0; usbcons_info.port = NULL; } }
./CrossVul/dataset_final_sorted/CWE-416/c/good_2906_0
crossvul-cpp_data_bad_2843_0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/workqueue.h> #include <linux/rtnetlink.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/rculist.h> #include <linux/nsproxy.h> #include <linux/fs.h> #include <linux/proc_ns.h> #include <linux/file.h> #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/net_namespace.h> #include <linux/sched/task.h> #include <net/sock.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> /* * Our network namespace constructor/destructor lists */ static LIST_HEAD(pernet_list); static struct list_head *first_device = &pernet_list; DEFINE_MUTEX(net_mutex); LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); struct net init_net = { .count = ATOMIC_INIT(1), .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), }; EXPORT_SYMBOL(init_net); static bool init_net_initialized; #define MIN_PERNET_OPS_ID \ ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; static struct net_generic *net_alloc_generic(void) { struct net_generic *ng; unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); ng = kzalloc(generic_size, GFP_KERNEL); if (ng) ng->s.len = max_gen_ptrs; return ng; } static int net_assign_generic(struct net *net, unsigned int id, void *data) { struct net_generic *ng, *old_ng; BUG_ON(!mutex_is_locked(&net_mutex)); BUG_ON(id < MIN_PERNET_OPS_ID); old_ng = rcu_dereference_protected(net->gen, lockdep_is_held(&net_mutex)); if (old_ng->s.len > id) { old_ng->ptr[id] = data; return 0; } ng = net_alloc_generic(); if (ng == NULL) return -ENOMEM; /* * Some synchronisation notes: * * The net_generic explores the net->gen array inside rcu * read section. Besides once set the net->gen->ptr[x] * pointer never changes (see rules in netns/generic.h). * * That said, we simply duplicate this array and schedule * the old copy for kfree after a grace period. */ memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); ng->ptr[id] = data; rcu_assign_pointer(net->gen, ng); kfree_rcu(old_ng, s.rcu); return 0; } static int ops_init(const struct pernet_operations *ops, struct net *net) { int err = -ENOMEM; void *data = NULL; if (ops->id && ops->size) { data = kzalloc(ops->size, GFP_KERNEL); if (!data) goto out; err = net_assign_generic(net, *ops->id, data); if (err) goto cleanup; } err = 0; if (ops->init) err = ops->init(net); if (!err) return 0; cleanup: kfree(data); out: return err; } static void ops_free(const struct pernet_operations *ops, struct net *net) { if (ops->id && ops->size) { kfree(net_generic(net, *ops->id)); } } static void ops_exit_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { struct net *net; if (ops->exit) { list_for_each_entry(net, net_exit_list, exit_list) ops->exit(net); } if (ops->exit_batch) ops->exit_batch(net_exit_list); } static void ops_free_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { struct net *net; if (ops->size && ops->id) { list_for_each_entry(net, net_exit_list, exit_list) ops_free(ops, net); } } /* should be called with nsid_lock held */ static int alloc_netid(struct net *net, struct net *peer, int reqid) { int min = 0, max = 0; if (reqid >= 0) { min = reqid; max = reqid + 1; } return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); } /* This function is used by idr_for_each(). If net is equal to peer, the * function returns the id so that idr_for_each() stops. Because we cannot * returns the id 0 (idr_for_each() will not stop), we return the magic value * NET_ID_ZERO (-1) for it. */ #define NET_ID_ZERO -1 static int net_eq_idr(int id, void *net, void *peer) { if (net_eq(net, peer)) return id ? : NET_ID_ZERO; return 0; } /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc * is set to true, thus the caller knows that the new id must be notified via * rtnl. */ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) { int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); bool alloc_it = *alloc; *alloc = false; /* Magic value for id 0. */ if (id == NET_ID_ZERO) return 0; if (id > 0) return id; if (alloc_it) { id = alloc_netid(net, peer, -1); *alloc = true; return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; } return NETNSA_NSID_NOT_ASSIGNED; } /* should be called with nsid_lock held */ static int __peernet2id(struct net *net, struct net *peer) { bool no = false; return __peernet2id_alloc(net, peer, &no); } static void rtnl_net_notifyid(struct net *net, int cmd, int id); /* This function returns the id of a peer netns. If no id is assigned, one will * be allocated and returned. */ int peernet2id_alloc(struct net *net, struct net *peer) { bool alloc; int id; if (atomic_read(&net->count) == 0) return NETNSA_NSID_NOT_ASSIGNED; spin_lock_bh(&net->nsid_lock); alloc = atomic_read(&peer->count) == 0 ? false : true; id = __peernet2id_alloc(net, peer, &alloc); spin_unlock_bh(&net->nsid_lock); if (alloc && id >= 0) rtnl_net_notifyid(net, RTM_NEWNSID, id); return id; } EXPORT_SYMBOL_GPL(peernet2id_alloc); /* This function returns, if assigned, the id of a peer netns. */ int peernet2id(struct net *net, struct net *peer) { int id; spin_lock_bh(&net->nsid_lock); id = __peernet2id(net, peer); spin_unlock_bh(&net->nsid_lock); return id; } EXPORT_SYMBOL(peernet2id); /* This function returns true is the peer netns has an id assigned into the * current netns. */ bool peernet_has_id(struct net *net, struct net *peer) { return peernet2id(net, peer) >= 0; } struct net *get_net_ns_by_id(struct net *net, int id) { struct net *peer; if (id < 0) return NULL; rcu_read_lock(); spin_lock_bh(&net->nsid_lock); peer = idr_find(&net->netns_ids, id); if (peer) get_net(peer); spin_unlock_bh(&net->nsid_lock); rcu_read_unlock(); return peer; } /* * setup_net runs the initializers for the network namespace object. */ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) { /* Must be called with net_mutex held */ const struct pernet_operations *ops, *saved_ops; int error = 0; LIST_HEAD(net_exit_list); atomic_set(&net->count, 1); refcount_set(&net->passive, 1); net->dev_base_seq = 1; net->user_ns = user_ns; idr_init(&net->netns_ids); spin_lock_init(&net->nsid_lock); list_for_each_entry(ops, &pernet_list, list) { error = ops_init(ops, net); if (error < 0) goto out_undo; } out: return error; out_undo: /* Walk through the list backwards calling the exit functions * for the pernet modules whose init functions did not fail. */ list_add(&net->exit_list, &net_exit_list); saved_ops = ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); ops = saved_ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_free_list(ops, &net_exit_list); rcu_barrier(); goto out; } static int __net_init net_defaults_init_net(struct net *net) { net->core.sysctl_somaxconn = SOMAXCONN; return 0; } static struct pernet_operations net_defaults_ops = { .init = net_defaults_init_net, }; static __init int net_defaults_init(void) { if (register_pernet_subsys(&net_defaults_ops)) panic("Cannot initialize net default settings"); return 0; } core_initcall(net_defaults_init); #ifdef CONFIG_NET_NS static struct ucounts *inc_net_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); } static void dec_net_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); } static struct kmem_cache *net_cachep; static struct workqueue_struct *netns_wq; static struct net *net_alloc(void) { struct net *net = NULL; struct net_generic *ng; ng = net_alloc_generic(); if (!ng) goto out; net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); if (!net) goto out_free; rcu_assign_pointer(net->gen, ng); out: return net; out_free: kfree(ng); goto out; } static void net_free(struct net *net) { kfree(rcu_access_pointer(net->gen)); kmem_cache_free(net_cachep, net); } void net_drop_ns(void *p) { struct net *ns = p; if (ns && refcount_dec_and_test(&ns->passive)) net_free(ns); } struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, struct net *old_net) { struct ucounts *ucounts; struct net *net; int rv; if (!(flags & CLONE_NEWNET)) return get_net(old_net); ucounts = inc_net_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC); net = net_alloc(); if (!net) { dec_net_namespaces(ucounts); return ERR_PTR(-ENOMEM); } get_user_ns(user_ns); rv = mutex_lock_killable(&net_mutex); if (rv < 0) { net_free(net); dec_net_namespaces(ucounts); put_user_ns(user_ns); return ERR_PTR(rv); } net->ucounts = ucounts; rv = setup_net(net, user_ns); if (rv == 0) { rtnl_lock(); list_add_tail_rcu(&net->list, &net_namespace_list); rtnl_unlock(); } mutex_unlock(&net_mutex); if (rv < 0) { dec_net_namespaces(ucounts); put_user_ns(user_ns); net_drop_ns(net); return ERR_PTR(rv); } return net; } static DEFINE_SPINLOCK(cleanup_list_lock); static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ static void cleanup_net(struct work_struct *work) { const struct pernet_operations *ops; struct net *net, *tmp; struct list_head net_kill_list; LIST_HEAD(net_exit_list); /* Atomically snapshot the list of namespaces to cleanup */ spin_lock_irq(&cleanup_list_lock); list_replace_init(&cleanup_list, &net_kill_list); spin_unlock_irq(&cleanup_list_lock); mutex_lock(&net_mutex); /* Don't let anyone else find us. */ rtnl_lock(); list_for_each_entry(net, &net_kill_list, cleanup_list) { list_del_rcu(&net->list); list_add_tail(&net->exit_list, &net_exit_list); for_each_net(tmp) { int id; spin_lock_bh(&tmp->nsid_lock); id = __peernet2id(tmp, net); if (id >= 0) idr_remove(&tmp->netns_ids, id); spin_unlock_bh(&tmp->nsid_lock); if (id >= 0) rtnl_net_notifyid(tmp, RTM_DELNSID, id); } spin_lock_bh(&net->nsid_lock); idr_destroy(&net->netns_ids); spin_unlock_bh(&net->nsid_lock); } rtnl_unlock(); /* * Another CPU might be rcu-iterating the list, wait for it. * This needs to be before calling the exit() notifiers, so * the rcu_barrier() below isn't sufficient alone. */ synchronize_rcu(); /* Run all of the network namespace exit methods */ list_for_each_entry_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); /* Free the net generic variables */ list_for_each_entry_reverse(ops, &pernet_list, list) ops_free_list(ops, &net_exit_list); mutex_unlock(&net_mutex); /* Ensure there are no outstanding rcu callbacks using this * network namespace. */ rcu_barrier(); /* Finally it is safe to free my network namespace structure */ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); dec_net_namespaces(net->ucounts); put_user_ns(net->user_ns); net_drop_ns(net); } } /** * net_ns_barrier - wait until concurrent net_cleanup_work is done * * cleanup_net runs from work queue and will first remove namespaces * from the global list, then run net exit functions. * * Call this in module exit path to make sure that all netns * ->exit ops have been invoked before the function is removed. */ void net_ns_barrier(void) { mutex_lock(&net_mutex); mutex_unlock(&net_mutex); } EXPORT_SYMBOL(net_ns_barrier); static DECLARE_WORK(net_cleanup_work, cleanup_net); void __put_net(struct net *net) { /* Cleanup the network namespace in process context */ unsigned long flags; spin_lock_irqsave(&cleanup_list_lock, flags); list_add(&net->cleanup_list, &cleanup_list); spin_unlock_irqrestore(&cleanup_list_lock, flags); queue_work(netns_wq, &net_cleanup_work); } EXPORT_SYMBOL_GPL(__put_net); struct net *get_net_ns_by_fd(int fd) { struct file *file; struct ns_common *ns; struct net *net; file = proc_ns_fget(fd); if (IS_ERR(file)) return ERR_CAST(file); ns = get_proc_ns(file_inode(file)); if (ns->ops == &netns_operations) net = get_net(container_of(ns, struct net, ns)); else net = ERR_PTR(-EINVAL); fput(file); return net; } #else struct net *get_net_ns_by_fd(int fd) { return ERR_PTR(-EINVAL); } #endif EXPORT_SYMBOL_GPL(get_net_ns_by_fd); struct net *get_net_ns_by_pid(pid_t pid) { struct task_struct *tsk; struct net *net; /* Lookup the network namespace */ net = ERR_PTR(-ESRCH); rcu_read_lock(); tsk = find_task_by_vpid(pid); if (tsk) { struct nsproxy *nsproxy; task_lock(tsk); nsproxy = tsk->nsproxy; if (nsproxy) net = get_net(nsproxy->net_ns); task_unlock(tsk); } rcu_read_unlock(); return net; } EXPORT_SYMBOL_GPL(get_net_ns_by_pid); static __net_init int net_ns_net_init(struct net *net) { #ifdef CONFIG_NET_NS net->ns.ops = &netns_operations; #endif return ns_alloc_inum(&net->ns); } static __net_exit void net_ns_net_exit(struct net *net) { ns_free_inum(&net->ns); } static struct pernet_operations __net_initdata net_ns_ops = { .init = net_ns_net_init, .exit = net_ns_net_exit, }; static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { [NETNSA_NONE] = { .type = NLA_UNSPEC }, [NETNSA_NSID] = { .type = NLA_S32 }, [NETNSA_PID] = { .type = NLA_U32 }, [NETNSA_FD] = { .type = NLA_U32 }, }; static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct nlattr *nla; struct net *peer; int nsid, err; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); if (err < 0) return err; if (!tb[NETNSA_NSID]) { NL_SET_ERR_MSG(extack, "nsid is missing"); return -EINVAL; } nsid = nla_get_s32(tb[NETNSA_NSID]); if (tb[NETNSA_PID]) { peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); nla = tb[NETNSA_PID]; } else if (tb[NETNSA_FD]) { peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); nla = tb[NETNSA_FD]; } else { NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); return -EINVAL; } if (IS_ERR(peer)) { NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); return PTR_ERR(peer); } spin_lock_bh(&net->nsid_lock); if (__peernet2id(net, peer) >= 0) { spin_unlock_bh(&net->nsid_lock); err = -EEXIST; NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, "Peer netns already has a nsid assigned"); goto out; } err = alloc_netid(net, peer, nsid); spin_unlock_bh(&net->nsid_lock); if (err >= 0) { rtnl_net_notifyid(net, RTM_NEWNSID, err); err = 0; } else if (err == -ENOSPC && nsid >= 0) { err = -EEXIST; NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); NL_SET_ERR_MSG(extack, "The specified nsid is already used"); } out: put_net(peer); return err; } static int rtnl_net_get_size(void) { return NLMSG_ALIGN(sizeof(struct rtgenmsg)) + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ ; } static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, int cmd, struct net *net, int nsid) { struct nlmsghdr *nlh; struct rtgenmsg *rth; nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); if (!nlh) return -EMSGSIZE; rth = nlmsg_data(nlh); rth->rtgen_family = AF_UNSPEC; if (nla_put_s32(skb, NETNSA_NSID, nsid)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct nlattr *nla; struct sk_buff *msg; struct net *peer; int err, id; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); if (err < 0) return err; if (tb[NETNSA_PID]) { peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); nla = tb[NETNSA_PID]; } else if (tb[NETNSA_FD]) { peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); nla = tb[NETNSA_FD]; } else { NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); return -EINVAL; } if (IS_ERR(peer)) { NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); return PTR_ERR(peer); } msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } id = peernet2id(net, peer); err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, RTM_NEWNSID, net, id); if (err < 0) goto err_out; err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); goto out; err_out: nlmsg_free(msg); out: put_net(peer); return err; } struct rtnl_net_dump_cb { struct net *net; struct sk_buff *skb; struct netlink_callback *cb; int idx; int s_idx; }; static int rtnl_net_dumpid_one(int id, void *peer, void *data) { struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; int ret; if (net_cb->idx < net_cb->s_idx) goto cont; ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWNSID, net_cb->net, id); if (ret < 0) return ret; cont: net_cb->idx++; return 0; } static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct rtnl_net_dump_cb net_cb = { .net = net, .skb = skb, .cb = cb, .idx = 0, .s_idx = cb->args[0], }; spin_lock_bh(&net->nsid_lock); idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); spin_unlock_bh(&net->nsid_lock); cb->args[0] = net_cb.idx; return skb->len; } static void rtnl_net_notifyid(struct net *net, int cmd, int id) { struct sk_buff *msg; int err = -ENOMEM; msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); if (!msg) goto out; err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); if (err < 0) goto err_out; rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); return; err_out: nlmsg_free(msg); out: rtnl_set_sk_err(net, RTNLGRP_NSID, err); } static int __init net_ns_init(void) { struct net_generic *ng; #ifdef CONFIG_NET_NS net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), SMP_CACHE_BYTES, SLAB_PANIC, NULL); /* Create workqueue for cleanup */ netns_wq = create_singlethread_workqueue("netns"); if (!netns_wq) panic("Could not create netns workq"); #endif ng = net_alloc_generic(); if (!ng) panic("Could not allocate generic netns"); rcu_assign_pointer(init_net.gen, ng); mutex_lock(&net_mutex); if (setup_net(&init_net, &init_user_ns)) panic("Could not setup the initial network namespace"); init_net_initialized = true; rtnl_lock(); list_add_tail_rcu(&init_net.list, &net_namespace_list); rtnl_unlock(); mutex_unlock(&net_mutex); register_pernet_subsys(&net_ns_ops); rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, RTNL_FLAG_DOIT_UNLOCKED); rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, RTNL_FLAG_DOIT_UNLOCKED); return 0; } pure_initcall(net_ns_init); #ifdef CONFIG_NET_NS static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { struct net *net; int error; LIST_HEAD(net_exit_list); list_add_tail(&ops->list, list); if (ops->init || (ops->id && ops->size)) { for_each_net(net) { error = ops_init(ops, net); if (error) goto out_undo; list_add_tail(&net->exit_list, &net_exit_list); } } return 0; out_undo: /* If I have an error cleanup all namespaces I initialized */ list_del(&ops->list); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); return error; } static void __unregister_pernet_operations(struct pernet_operations *ops) { struct net *net; LIST_HEAD(net_exit_list); list_del(&ops->list); for_each_net(net) list_add_tail(&net->exit_list, &net_exit_list); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); } #else static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { if (!init_net_initialized) { list_add_tail(&ops->list, list); return 0; } return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) { if (!init_net_initialized) { list_del(&ops->list); } else { LIST_HEAD(net_exit_list); list_add(&init_net.exit_list, &net_exit_list); ops_exit_list(ops, &net_exit_list); ops_free_list(ops, &net_exit_list); } } #endif /* CONFIG_NET_NS */ static DEFINE_IDA(net_generic_ids); static int register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { int error; if (ops->id) { again: error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); if (error < 0) { if (error == -EAGAIN) { ida_pre_get(&net_generic_ids, GFP_KERNEL); goto again; } return error; } max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); } error = __register_pernet_operations(list, ops); if (error) { rcu_barrier(); if (ops->id) ida_remove(&net_generic_ids, *ops->id); } return error; } static void unregister_pernet_operations(struct pernet_operations *ops) { __unregister_pernet_operations(ops); rcu_barrier(); if (ops->id) ida_remove(&net_generic_ids, *ops->id); } /** * register_pernet_subsys - register a network namespace subsystem * @ops: pernet operations structure for the subsystem * * Register a subsystem which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */ int register_pernet_subsys(struct pernet_operations *ops) { int error; mutex_lock(&net_mutex); error = register_pernet_operations(first_device, ops); mutex_unlock(&net_mutex); return error; } EXPORT_SYMBOL_GPL(register_pernet_subsys); /** * unregister_pernet_subsys - unregister a network namespace subsystem * @ops: pernet operations structure to manipulate * * Remove the pernet operations structure from the list to be * used when network namespaces are created or destroyed. In * addition run the exit method for all existing network * namespaces. */ void unregister_pernet_subsys(struct pernet_operations *ops) { mutex_lock(&net_mutex); unregister_pernet_operations(ops); mutex_unlock(&net_mutex); } EXPORT_SYMBOL_GPL(unregister_pernet_subsys); /** * register_pernet_device - register a network namespace device * @ops: pernet operations structure for the subsystem * * Register a device which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */ int register_pernet_device(struct pernet_operations *ops) { int error; mutex_lock(&net_mutex); error = register_pernet_operations(&pernet_list, ops); if (!error && (first_device == &pernet_list)) first_device = &ops->list; mutex_unlock(&net_mutex); return error; } EXPORT_SYMBOL_GPL(register_pernet_device); /** * unregister_pernet_device - unregister a network namespace netdevice * @ops: pernet operations structure to manipulate * * Remove the pernet operations structure from the list to be * used when network namespaces are created or destroyed. In * addition run the exit method for all existing network * namespaces. */ void unregister_pernet_device(struct pernet_operations *ops) { mutex_lock(&net_mutex); if (&ops->list == first_device) first_device = first_device->next; unregister_pernet_operations(ops); mutex_unlock(&net_mutex); } EXPORT_SYMBOL_GPL(unregister_pernet_device); #ifdef CONFIG_NET_NS static struct ns_common *netns_get(struct task_struct *task) { struct net *net = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) net = get_net(nsproxy->net_ns); task_unlock(task); return net ? &net->ns : NULL; } static inline struct net *to_net_ns(struct ns_common *ns) { return container_of(ns, struct net, ns); } static void netns_put(struct ns_common *ns) { put_net(to_net_ns(ns)); } static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) { struct net *net = to_net_ns(ns); if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; put_net(nsproxy->net_ns); nsproxy->net_ns = get_net(net); return 0; } static struct user_namespace *netns_owner(struct ns_common *ns) { return to_net_ns(ns)->user_ns; } const struct proc_ns_operations netns_operations = { .name = "net", .type = CLONE_NEWNET, .get = netns_get, .put = netns_put, .install = netns_install, .owner = netns_owner, }; #endif
./CrossVul/dataset_final_sorted/CWE-416/c/bad_2843_0
crossvul-cpp_data_good_574_0
/* * linux/drivers/block/loop.c * * Written by Theodore Ts'o, 3/29/93 * * Copyright 1993 by Theodore Ts'o. Redistribution of this file is * permitted under the GNU General Public License. * * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 * * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 * * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 * * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 * * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 * * Loadable modules and other fixes by AK, 1998 * * Make real block number available to downstream transfer functions, enables * CBC (and relatives) mode encryption requiring unique IVs per data block. * Reed H. Petty, rhp@draper.net * * Maximum number of loop devices now dynamic via max_loop module parameter. * Russell Kroll <rkroll@exploits.org> 19990701 * * Maximum number of loop devices when compiled-in now selectable by passing * max_loop=<1-255> to the kernel on boot. * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 * * Completely rewrite request handling to be make_request_fn style and * non blocking, pushing work to a helper thread. Lots of fixes from * Al Viro too. * Jens Axboe <axboe@suse.de>, Nov 2000 * * Support up to 256 loop devices * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 * * Support for falling back on the write file operation when the address space * operations write_begin is not available on the backing filesystem. * Anton Altaparmakov, 16 Feb 2005 * * Still To Fix: * - Advisory locking is ignored here. * - Should use an own CAP_* category instead of CAP_SYS_ADMIN * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/wait.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/init.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/compat.h> #include <linux/suspend.h> #include <linux/freezer.h> #include <linux/mutex.h> #include <linux/writeback.h> #include <linux/completion.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/splice.h> #include <linux/sysfs.h> #include <linux/miscdevice.h> #include <linux/falloc.h> #include <linux/uio.h> #include "loop.h" #include <linux/uaccess.h> static DEFINE_IDR(loop_index_idr); static DEFINE_MUTEX(loop_index_mutex); static int max_part; static int part_shift; static int transfer_xor(struct loop_device *lo, int cmd, struct page *raw_page, unsigned raw_off, struct page *loop_page, unsigned loop_off, int size, sector_t real_block) { char *raw_buf = kmap_atomic(raw_page) + raw_off; char *loop_buf = kmap_atomic(loop_page) + loop_off; char *in, *out, *key; int i, keysize; if (cmd == READ) { in = raw_buf; out = loop_buf; } else { in = loop_buf; out = raw_buf; } key = lo->lo_encrypt_key; keysize = lo->lo_encrypt_key_size; for (i = 0; i < size; i++) *out++ = *in++ ^ key[(i & 511) % keysize]; kunmap_atomic(loop_buf); kunmap_atomic(raw_buf); cond_resched(); return 0; } static int xor_init(struct loop_device *lo, const struct loop_info64 *info) { if (unlikely(info->lo_encrypt_key_size <= 0)) return -EINVAL; return 0; } static struct loop_func_table none_funcs = { .number = LO_CRYPT_NONE, }; static struct loop_func_table xor_funcs = { .number = LO_CRYPT_XOR, .transfer = transfer_xor, .init = xor_init }; /* xfer_funcs[0] is special - its release function is never called */ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { &none_funcs, &xor_funcs }; static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) { loff_t loopsize; /* Compute loopsize in bytes */ loopsize = i_size_read(file->f_mapping->host); if (offset > 0) loopsize -= offset; /* offset is beyond i_size, weird but possible */ if (loopsize < 0) return 0; if (sizelimit > 0 && sizelimit < loopsize) loopsize = sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. */ return loopsize >> 9; } static loff_t get_loop_size(struct loop_device *lo, struct file *file) { return get_size(lo->lo_offset, lo->lo_sizelimit, file); } static void __loop_update_dio(struct loop_device *lo, bool dio) { struct file *file = lo->lo_backing_file; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; unsigned short sb_bsize = 0; unsigned dio_align = 0; bool use_dio; if (inode->i_sb->s_bdev) { sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); dio_align = sb_bsize - 1; } /* * We support direct I/O only if lo_offset is aligned with the * logical I/O size of backing device, and the logical block * size of loop is bigger than the backing device's and the loop * needn't transform transfer. * * TODO: the above condition may be loosed in the future, and * direct I/O may be switched runtime at that time because most * of requests in sane applications should be PAGE_SIZE aligned */ if (dio) { if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && !(lo->lo_offset & dio_align) && mapping->a_ops->direct_IO && !lo->transfer) use_dio = true; else use_dio = false; } else { use_dio = false; } if (lo->use_dio == use_dio) return; /* flush dirty pages before changing direct IO */ vfs_fsync(file, 0); /* * The flag of LO_FLAGS_DIRECT_IO is handled similarly with * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup * will get updated by ioctl(LOOP_GET_STATUS) */ blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) { queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags |= LO_FLAGS_DIRECT_IO; } else { queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } blk_mq_unfreeze_queue(lo->lo_queue); } static int figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); sector_t x = (sector_t)size; struct block_device *bdev = lo->lo_device; if (unlikely((loff_t)x != size)) return -EFBIG; if (lo->lo_offset != offset) lo->lo_offset = offset; if (lo->lo_sizelimit != sizelimit) lo->lo_sizelimit = sizelimit; set_capacity(lo->lo_disk, x); bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); return 0; } static inline int lo_do_transfer(struct loop_device *lo, int cmd, struct page *rpage, unsigned roffs, struct page *lpage, unsigned loffs, int size, sector_t rblock) { int ret; ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); if (likely(!ret)) return 0; printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, length %i.\n", (unsigned long long)rblock << 9, size); return ret; } static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) { struct iov_iter i; ssize_t bw; iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); file_end_write(file); if (likely(bw == bvec->bv_len)) return 0; printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", (unsigned long long)*ppos, bvec->bv_len); if (bw >= 0) bw = -EIO; return bw; } static int lo_write_simple(struct loop_device *lo, struct request *rq, loff_t pos) { struct bio_vec bvec; struct req_iterator iter; int ret = 0; rq_for_each_segment(bvec, rq, iter) { ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); if (ret < 0) break; cond_resched(); } return ret; } /* * This is the slow, transforming version that needs to double buffer the * data as it cannot do the transformations in place without having direct * access to the destination pages of the backing file. */ static int lo_write_transfer(struct loop_device *lo, struct request *rq, loff_t pos) { struct bio_vec bvec, b; struct req_iterator iter; struct page *page; int ret = 0; page = alloc_page(GFP_NOIO); if (unlikely(!page)) return -ENOMEM; rq_for_each_segment(bvec, rq, iter) { ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, bvec.bv_offset, bvec.bv_len, pos >> 9); if (unlikely(ret)) break; b.bv_page = page; b.bv_offset = 0; b.bv_len = bvec.bv_len; ret = lo_write_bvec(lo->lo_backing_file, &b, &pos); if (ret < 0) break; } __free_page(page); return ret; } static int lo_read_simple(struct loop_device *lo, struct request *rq, loff_t pos) { struct bio_vec bvec; struct req_iterator iter; struct iov_iter i; ssize_t len; rq_for_each_segment(bvec, rq, iter) { iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) return len; flush_dcache_page(bvec.bv_page); if (len != bvec.bv_len) { struct bio *bio; __rq_for_each_bio(bio, rq) zero_fill_bio(bio); break; } cond_resched(); } return 0; } static int lo_read_transfer(struct loop_device *lo, struct request *rq, loff_t pos) { struct bio_vec bvec, b; struct req_iterator iter; struct iov_iter i; struct page *page; ssize_t len; int ret = 0; page = alloc_page(GFP_NOIO); if (unlikely(!page)) return -ENOMEM; rq_for_each_segment(bvec, rq, iter) { loff_t offset = pos; b.bv_page = page; b.bv_offset = 0; b.bv_len = bvec.bv_len; iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) { ret = len; goto out_free_page; } ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, bvec.bv_offset, len, offset >> 9); if (ret) goto out_free_page; flush_dcache_page(bvec.bv_page); if (len != bvec.bv_len) { struct bio *bio; __rq_for_each_bio(bio, rq) zero_fill_bio(bio); break; } } ret = 0; out_free_page: __free_page(page); return ret; } static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos) { /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. However we do not support discard if * encryption is enabled, because it may give an attacker * useful information. */ struct file *file = lo->lo_backing_file; int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; int ret; if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { ret = -EOPNOTSUPP; goto out; } ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) ret = -EIO; out: return ret; } static int lo_req_flush(struct loop_device *lo, struct request *rq) { struct file *file = lo->lo_backing_file; int ret = vfs_fsync(file, 0); if (unlikely(ret && ret != -EINVAL)) ret = -EIO; return ret; } static void lo_complete_rq(struct request *rq) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { struct bio *bio = cmd->rq->bio; bio_advance(bio, cmd->ret); zero_fill_bio(bio); } blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); } static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; blk_mq_complete_request(cmd->rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) { struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); if (cmd->css) css_put(cmd->css); cmd->ret = ret; lo_rw_aio_do_completion(cmd); } static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, loff_t pos, bool rw) { struct iov_iter iter; struct bio_vec *bvec; struct request *rq = cmd->rq; struct bio *bio = rq->bio; struct file *file = lo->lo_backing_file; unsigned int offset; int segments = 0; int ret; if (rq->bio != rq->biotail) { struct req_iterator iter; struct bio_vec tmp; __rq_for_each_bio(bio, rq) segments += bio_segments(bio); bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO); if (!bvec) return -EIO; cmd->bvec = bvec; /* * The bios of the request may be started from the middle of * the 'bvec' because of bio splitting, so we can't directly * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment * API will take care of all details for us. */ rq_for_each_segment(tmp, rq, iter) { *bvec = tmp; bvec++; } bvec = cmd->bvec; offset = 0; } else { /* * Same here, this bio may be started from the middle of the * 'bvec' because of bio splitting, so offset from the bvec * must be passed to iov iterator */ offset = bio->bi_iter.bi_bvec_done; bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); segments = bio_segments(bio); } atomic_set(&cmd->ref, 2); iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, segments, blk_rq_bytes(rq)); iter.iov_offset = offset; cmd->iocb.ki_pos = pos; cmd->iocb.ki_filp = file; cmd->iocb.ki_complete = lo_rw_aio_complete; cmd->iocb.ki_flags = IOCB_DIRECT; if (cmd->css) kthread_associate_blkcg(cmd->css); if (rw == WRITE) ret = call_write_iter(file, &cmd->iocb, &iter); else ret = call_read_iter(file, &cmd->iocb, &iter); lo_rw_aio_do_completion(cmd); kthread_associate_blkcg(NULL); if (ret != -EIOCBQUEUED) cmd->iocb.ki_complete(&cmd->iocb, ret, 0); return 0; } static int do_req_filebacked(struct loop_device *lo, struct request *rq) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; /* * lo_write_simple and lo_read_simple should have been covered * by io submit style function like lo_rw_aio(), one blocker * is that lo_read_simple() need to call flush_dcache_page after * the page is written from kernel, and it isn't easy to handle * this in io submit style function which submits all segments * of the req at one time. And direct read IO doesn't need to * run flush_dcache_page(). */ switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: return lo_discard(lo, rq, pos); case REQ_OP_WRITE: if (lo->transfer) return lo_write_transfer(lo, rq, pos); else if (cmd->use_aio) return lo_rw_aio(lo, cmd, pos, WRITE); else return lo_write_simple(lo, rq, pos); case REQ_OP_READ: if (lo->transfer) return lo_read_transfer(lo, rq, pos); else if (cmd->use_aio) return lo_rw_aio(lo, cmd, pos, READ); else return lo_read_simple(lo, rq, pos); default: WARN_ON_ONCE(1); return -EIO; break; } } static inline void loop_update_dio(struct loop_device *lo) { __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | lo->use_dio); } static void loop_reread_partitions(struct loop_device *lo, struct block_device *bdev) { int rc; /* * bd_mutex has been held already in release path, so don't * acquire it if this function is called in such case. * * If the reread partition isn't from release path, lo_refcnt * must be at least one and it can only become zero when the * current holder is released. */ if (!atomic_read(&lo->lo_refcnt)) rc = __blkdev_reread_part(bdev); else rc = blkdev_reread_part(bdev); if (rc) pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", __func__, lo->lo_number, lo->lo_file_name, rc); } /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up * the original file and in High Availability environments to switch to * an alternative location for the content in case of server meltdown. * This can only work if the loop device is used read-only, and if the * new backing store is the same size and type as the old backing store. */ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { struct file *file, *old_file; struct inode *inode; int error; error = -ENXIO; if (lo->lo_state != Lo_bound) goto out; /* the loop device has to be read-only */ error = -EINVAL; if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) goto out; error = -EBADF; file = fget(arg); if (!file) goto out; inode = file->f_mapping->host; old_file = lo->lo_backing_file; error = -EINVAL; if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) goto out_putf; /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) goto out_putf; /* and ... switch */ blk_mq_freeze_queue(lo->lo_queue); mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); lo->lo_backing_file = file; lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); mapping_set_gfp_mask(file->f_mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); fput(old_file); if (lo->lo_flags & LO_FLAGS_PARTSCAN) loop_reread_partitions(lo, bdev); return 0; out_putf: fput(file); out: return error; } static inline int is_loop_device(struct file *file) { struct inode *i = file->f_mapping->host; return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; } /* loop sysfs attributes */ static ssize_t loop_attr_show(struct device *dev, char *page, ssize_t (*callback)(struct loop_device *, char *)) { struct gendisk *disk = dev_to_disk(dev); struct loop_device *lo = disk->private_data; return callback(lo, page); } #define LOOP_ATTR_RO(_name) \ static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ static ssize_t loop_attr_do_show_##_name(struct device *d, \ struct device_attribute *attr, char *b) \ { \ return loop_attr_show(d, b, loop_attr_##_name##_show); \ } \ static struct device_attribute loop_attr_##_name = \ __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) { ssize_t ret; char *p = NULL; spin_lock_irq(&lo->lo_lock); if (lo->lo_backing_file) p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); spin_unlock_irq(&lo->lo_lock); if (IS_ERR_OR_NULL(p)) ret = PTR_ERR(p); else { ret = strlen(p); memmove(buf, p, ret); buf[ret++] = '\n'; buf[ret] = 0; } return ret; } static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); } static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); } static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) { int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); return sprintf(buf, "%s\n", autoclear ? "1" : "0"); } static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) { int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); return sprintf(buf, "%s\n", partscan ? "1" : "0"); } static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) { int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); return sprintf(buf, "%s\n", dio ? "1" : "0"); } LOOP_ATTR_RO(backing_file); LOOP_ATTR_RO(offset); LOOP_ATTR_RO(sizelimit); LOOP_ATTR_RO(autoclear); LOOP_ATTR_RO(partscan); LOOP_ATTR_RO(dio); static struct attribute *loop_attrs[] = { &loop_attr_backing_file.attr, &loop_attr_offset.attr, &loop_attr_sizelimit.attr, &loop_attr_autoclear.attr, &loop_attr_partscan.attr, &loop_attr_dio.attr, NULL, }; static struct attribute_group loop_attribute_group = { .name = "loop", .attrs= loop_attrs, }; static int loop_sysfs_init(struct loop_device *lo) { return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, &loop_attribute_group); } static void loop_sysfs_exit(struct loop_device *lo) { sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, &loop_attribute_group); } static void loop_config_discard(struct loop_device *lo) { struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. However we do not support discard if * encryption is enabled, because it may give an attacker * useful information. */ if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { q->limits.discard_granularity = 0; q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); return; } q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) { kthread_flush_worker(&lo->worker); kthread_stop(lo->worker_task); } static int loop_kthread_worker_fn(void *worker_ptr) { current->flags |= PF_LESS_THROTTLE; return kthread_worker_fn(worker_ptr); } static int loop_prepare_queue(struct loop_device *lo) { kthread_init_worker(&lo->worker); lo->worker_task = kthread_run(loop_kthread_worker_fn, &lo->worker, "loop%d", lo->lo_number); if (IS_ERR(lo->worker_task)) return -ENOMEM; set_user_nice(lo->worker_task, MIN_NICE); return 0; } static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) { struct file *file, *f; struct inode *inode; struct address_space *mapping; int lo_flags = 0; int error; loff_t size; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); error = -EBADF; file = fget(arg); if (!file) goto out; error = -EBUSY; if (lo->lo_state != Lo_unbound) goto out_putf; /* Avoid recursion */ f = file; while (is_loop_device(f)) { struct loop_device *l; if (f->f_mapping->host->i_bdev == bdev) goto out_putf; l = f->f_mapping->host->i_bdev->bd_disk->private_data; if (l->lo_state == Lo_unbound) { error = -EINVAL; goto out_putf; } f = l->lo_backing_file; } mapping = file->f_mapping; inode = mapping->host; error = -EINVAL; if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) goto out_putf; if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || !file->f_op->write_iter) lo_flags |= LO_FLAGS_READ_ONLY; error = -EFBIG; size = get_loop_size(lo, file); if ((loff_t)(sector_t)size != size) goto out_putf; error = loop_prepare_queue(lo); if (error) goto out_putf; error = 0; set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); lo->use_dio = false; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; lo->transfer = NULL; lo->ioctl = NULL; lo->lo_sizelimit = 0; lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) blk_queue_write_cache(lo->lo_queue, true, false); loop_update_dio(lo); set_capacity(lo->lo_disk, size); bd_set_size(bdev, size << 9); loop_sysfs_init(lo); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); set_blocksize(bdev, S_ISBLK(inode->i_mode) ? block_size(inode->i_bdev) : PAGE_SIZE); lo->lo_state = Lo_bound; if (part_shift) lo->lo_flags |= LO_FLAGS_PARTSCAN; if (lo->lo_flags & LO_FLAGS_PARTSCAN) loop_reread_partitions(lo, bdev); /* Grab the block_device to prevent its destruction after we * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). */ bdgrab(bdev); return 0; out_putf: fput(file); out: /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return error; } static int loop_release_xfer(struct loop_device *lo) { int err = 0; struct loop_func_table *xfer = lo->lo_encryption; if (xfer) { if (xfer->release) err = xfer->release(lo); lo->transfer = NULL; lo->lo_encryption = NULL; module_put(xfer->owner); } return err; } static int loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, const struct loop_info64 *i) { int err = 0; if (xfer) { struct module *owner = xfer->owner; if (!try_module_get(owner)) return -EINVAL; if (xfer->init) err = xfer->init(lo, i); if (err) module_put(owner); else lo->lo_encryption = xfer; } return err; } static int loop_clr_fd(struct loop_device *lo) { struct file *filp = lo->lo_backing_file; gfp_t gfp = lo->old_gfp_mask; struct block_device *bdev = lo->lo_device; if (lo->lo_state != Lo_bound) return -ENXIO; /* * If we've explicitly asked to tear down the loop device, * and it has an elevated reference count, set it for auto-teardown when * the last reference goes away. This stops $!~#$@ udev from * preventing teardown because it decided that it needs to run blkid on * the loopback device whenever they appear. xfstests is notorious for * failing tests because blkid via udev races with a losetup * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d * command to fail with EBUSY. */ if (atomic_read(&lo->lo_refcnt) > 1) { lo->lo_flags |= LO_FLAGS_AUTOCLEAR; mutex_unlock(&lo->lo_ctl_mutex); return 0; } if (filp == NULL) return -EINVAL; /* freeze request queue during the transition */ blk_mq_freeze_queue(lo->lo_queue); spin_lock_irq(&lo->lo_lock); lo->lo_state = Lo_rundown; lo->lo_backing_file = NULL; spin_unlock_irq(&lo->lo_lock); loop_release_xfer(lo); lo->transfer = NULL; lo->ioctl = NULL; lo->lo_device = NULL; lo->lo_encryption = NULL; lo->lo_offset = 0; lo->lo_sizelimit = 0; lo->lo_encrypt_key_size = 0; memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); blk_queue_logical_block_size(lo->lo_queue, 512); blk_queue_physical_block_size(lo->lo_queue, 512); blk_queue_io_min(lo->lo_queue, 512); if (bdev) { bdput(bdev); invalidate_bdev(bdev); } set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { bd_set_size(bdev, 0); /* let user-space know about this change */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } mapping_set_gfp_mask(filp->f_mapping, gfp); lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) loop_reread_partitions(lo, bdev); lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; loop_unprepare_queue(lo); mutex_unlock(&lo->lo_ctl_mutex); /* * Need not hold lo_ctl_mutex to fput backing file. * Calling fput holding lo_ctl_mutex triggers a circular * lock dependency possibility warning as fput can take * bd_mutex which is usually taken before lo_ctl_mutex. */ fput(filp); return 0; } static int loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; struct loop_func_table *xfer; kuid_t uid = current_uid(); if (lo->lo_encrypt_key_size && !uid_eq(lo->lo_key_owner, uid) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (lo->lo_state != Lo_bound) return -ENXIO; if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) return -EINVAL; /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); err = loop_release_xfer(lo); if (err) goto exit; if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; if (type >= MAX_LO_CRYPT) return -EINVAL; xfer = xfer_funcs[type]; if (xfer == NULL) return -EINVAL; } else xfer = NULL; err = loop_init_xfer(lo, xfer, info); if (err) goto exit; if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { err = -EFBIG; goto exit; } } loop_config_discard(lo); memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; if (!xfer) xfer = &none_funcs; lo->transfer = xfer->transfer; lo->ioctl = xfer->ioctl; if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != (info->lo_flags & LO_FLAGS_AUTOCLEAR)) lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; lo->lo_encrypt_key_size = info->lo_encrypt_key_size; lo->lo_init[0] = info->lo_init[0]; lo->lo_init[1] = info->lo_init[1]; if (info->lo_encrypt_key_size) { memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, info->lo_encrypt_key_size); lo->lo_key_owner = uid; } /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); exit: blk_mq_unfreeze_queue(lo->lo_queue); if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_flags |= LO_FLAGS_PARTSCAN; lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; loop_reread_partitions(lo, lo->lo_device); } return err; } static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { struct file *file = lo->lo_backing_file; struct kstat stat; int error; if (lo->lo_state != Lo_bound) return -ENXIO; error = vfs_getattr(&file->f_path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); if (error) return error; memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; info->lo_device = huge_encode_dev(stat.dev); info->lo_inode = stat.ino; info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); info->lo_encrypt_type = lo->lo_encryption ? lo->lo_encryption->number : 0; if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { info->lo_encrypt_key_size = lo->lo_encrypt_key_size; memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); } return 0; } static void loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) { memset(info64, 0, sizeof(*info64)); info64->lo_number = info->lo_number; info64->lo_device = info->lo_device; info64->lo_inode = info->lo_inode; info64->lo_rdevice = info->lo_rdevice; info64->lo_offset = info->lo_offset; info64->lo_sizelimit = 0; info64->lo_encrypt_type = info->lo_encrypt_type; info64->lo_encrypt_key_size = info->lo_encrypt_key_size; info64->lo_flags = info->lo_flags; info64->lo_init[0] = info->lo_init[0]; info64->lo_init[1] = info->lo_init[1]; if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); else memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); } static int loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) { memset(info, 0, sizeof(*info)); info->lo_number = info64->lo_number; info->lo_device = info64->lo_device; info->lo_inode = info64->lo_inode; info->lo_rdevice = info64->lo_rdevice; info->lo_offset = info64->lo_offset; info->lo_encrypt_type = info64->lo_encrypt_type; info->lo_encrypt_key_size = info64->lo_encrypt_key_size; info->lo_flags = info64->lo_flags; info->lo_init[0] = info64->lo_init[0]; info->lo_init[1] = info64->lo_init[1]; if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); else memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); /* error in case values were truncated */ if (info->lo_device != info64->lo_device || info->lo_rdevice != info64->lo_rdevice || info->lo_inode != info64->lo_inode || info->lo_offset != info64->lo_offset) return -EOVERFLOW; return 0; } static int loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; if (copy_from_user(&info, arg, sizeof (struct loop_info))) return -EFAULT; loop_info64_from_old(&info, &info64); return loop_set_status(lo, &info64); } static int loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) { struct loop_info64 info64; if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) return -EFAULT; return loop_set_status(lo, &info64); } static int loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); if (!err && copy_to_user(arg, &info, sizeof(info))) err = -EFAULT; return err; } static int loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; return err; } static int loop_set_capacity(struct loop_device *lo) { if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); } static int loop_set_dio(struct loop_device *lo, unsigned long arg) { int error = -ENXIO; if (lo->lo_state != Lo_bound) goto out; __loop_update_dio(lo, !!arg); if (lo->use_dio == !!arg) return 0; error = -EINVAL; out: return error; } static int loop_set_block_size(struct loop_device *lo, unsigned long arg) { if (lo->lo_state != Lo_bound) return -ENXIO; if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) return -EINVAL; blk_mq_freeze_queue(lo->lo_queue); blk_queue_logical_block_size(lo->lo_queue, arg); blk_queue_physical_block_size(lo->lo_queue, arg); blk_queue_io_min(lo->lo_queue, arg); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue); return 0; } static int lo_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; int err; mutex_lock_nested(&lo->lo_ctl_mutex, 1); switch (cmd) { case LOOP_SET_FD: err = loop_set_fd(lo, mode, bdev, arg); break; case LOOP_CHANGE_FD: err = loop_change_fd(lo, bdev, arg); break; case LOOP_CLR_FD: /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ err = loop_clr_fd(lo); if (!err) goto out_unlocked; break; case LOOP_SET_STATUS: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status_old(lo, (struct loop_info __user *)arg); break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); break; case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status64(lo, (struct loop_info64 __user *) arg); break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); break; case LOOP_SET_CAPACITY: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_capacity(lo); break; case LOOP_SET_DIRECT_IO: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_dio(lo, arg); break; case LOOP_SET_BLOCK_SIZE: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_block_size(lo, arg); break; default: err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; } mutex_unlock(&lo->lo_ctl_mutex); out_unlocked: return err; } #ifdef CONFIG_COMPAT struct compat_loop_info { compat_int_t lo_number; /* ioctl r/o */ compat_dev_t lo_device; /* ioctl r/o */ compat_ulong_t lo_inode; /* ioctl r/o */ compat_dev_t lo_rdevice; /* ioctl r/o */ compat_int_t lo_offset; compat_int_t lo_encrypt_type; compat_int_t lo_encrypt_key_size; /* ioctl w/o */ compat_int_t lo_flags; /* ioctl r/o */ char lo_name[LO_NAME_SIZE]; unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ compat_ulong_t lo_init[2]; char reserved[4]; }; /* * Transfer 32-bit compatibility structure in userspace to 64-bit loop info * - noinlined to reduce stack space usage in main part of driver */ static noinline int loop_info64_from_compat(const struct compat_loop_info __user *arg, struct loop_info64 *info64) { struct compat_loop_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; memset(info64, 0, sizeof(*info64)); info64->lo_number = info.lo_number; info64->lo_device = info.lo_device; info64->lo_inode = info.lo_inode; info64->lo_rdevice = info.lo_rdevice; info64->lo_offset = info.lo_offset; info64->lo_sizelimit = 0; info64->lo_encrypt_type = info.lo_encrypt_type; info64->lo_encrypt_key_size = info.lo_encrypt_key_size; info64->lo_flags = info.lo_flags; info64->lo_init[0] = info.lo_init[0]; info64->lo_init[1] = info.lo_init[1]; if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); else memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); return 0; } /* * Transfer 64-bit loop info to 32-bit compatibility structure in userspace * - noinlined to reduce stack space usage in main part of driver */ static noinline int loop_info64_to_compat(const struct loop_info64 *info64, struct compat_loop_info __user *arg) { struct compat_loop_info info; memset(&info, 0, sizeof(info)); info.lo_number = info64->lo_number; info.lo_device = info64->lo_device; info.lo_inode = info64->lo_inode; info.lo_rdevice = info64->lo_rdevice; info.lo_offset = info64->lo_offset; info.lo_encrypt_type = info64->lo_encrypt_type; info.lo_encrypt_key_size = info64->lo_encrypt_key_size; info.lo_flags = info64->lo_flags; info.lo_init[0] = info64->lo_init[0]; info.lo_init[1] = info64->lo_init[1]; if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); else memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); /* error in case values were truncated */ if (info.lo_device != info64->lo_device || info.lo_rdevice != info64->lo_rdevice || info.lo_inode != info64->lo_inode || info.lo_offset != info64->lo_offset || info.lo_init[0] != info64->lo_init[0] || info.lo_init[1] != info64->lo_init[1]) return -EOVERFLOW; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int loop_set_status_compat(struct loop_device *lo, const struct compat_loop_info __user *arg) { struct loop_info64 info64; int ret; ret = loop_info64_from_compat(arg, &info64); if (ret < 0) return ret; return loop_set_status(lo, &info64); } static int loop_get_status_compat(struct loop_device *lo, struct compat_loop_info __user *arg) { struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); return err; } static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; int err; switch(cmd) { case LOOP_SET_STATUS: mutex_lock(&lo->lo_ctl_mutex); err = loop_set_status_compat( lo, (const struct compat_loop_info __user *) arg); mutex_unlock(&lo->lo_ctl_mutex); break; case LOOP_GET_STATUS: mutex_lock(&lo->lo_ctl_mutex); err = loop_get_status_compat( lo, (struct compat_loop_info __user *) arg); mutex_unlock(&lo->lo_ctl_mutex); break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: case LOOP_GET_STATUS64: case LOOP_SET_STATUS64: arg = (unsigned long) compat_ptr(arg); case LOOP_SET_FD: case LOOP_CHANGE_FD: err = lo_ioctl(bdev, mode, cmd, arg); break; default: err = -ENOIOCTLCMD; break; } return err; } #endif static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo; int err = 0; mutex_lock(&loop_index_mutex); lo = bdev->bd_disk->private_data; if (!lo) { err = -ENXIO; goto out; } atomic_inc(&lo->lo_refcnt); out: mutex_unlock(&loop_index_mutex); return err; } static void __lo_release(struct loop_device *lo) { int err; if (atomic_dec_return(&lo->lo_refcnt)) return; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { /* * In autoclear mode, stop the loop thread * and remove configuration after last close. */ err = loop_clr_fd(lo); if (!err) return; } else if (lo->lo_state == Lo_bound) { /* * Otherwise keep thread (if running) and config, * but flush possible ongoing bios in thread. */ blk_mq_freeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue); } mutex_unlock(&lo->lo_ctl_mutex); } static void lo_release(struct gendisk *disk, fmode_t mode) { mutex_lock(&loop_index_mutex); __lo_release(disk->private_data); mutex_unlock(&loop_index_mutex); } static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, .release = lo_release, .ioctl = lo_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lo_compat_ioctl, #endif }; /* * And now the modules code and kernel interface. */ static int max_loop; module_param(max_loop, int, S_IRUGO); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); module_param(max_part, int, S_IRUGO); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); int loop_register_transfer(struct loop_func_table *funcs) { unsigned int n = funcs->number; if (n >= MAX_LO_CRYPT || xfer_funcs[n]) return -EINVAL; xfer_funcs[n] = funcs; return 0; } static int unregister_transfer_cb(int id, void *ptr, void *data) { struct loop_device *lo = ptr; struct loop_func_table *xfer = data; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_encryption == xfer) loop_release_xfer(lo); mutex_unlock(&lo->lo_ctl_mutex); return 0; } int loop_unregister_transfer(int number) { unsigned int n = number; struct loop_func_table *xfer; if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) return -EINVAL; xfer_funcs[n] = NULL; idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); return 0; } EXPORT_SYMBOL(loop_register_transfer); EXPORT_SYMBOL(loop_unregister_transfer); static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); struct loop_device *lo = cmd->rq->q->queuedata; blk_mq_start_request(bd->rq); if (lo->lo_state != Lo_bound) return BLK_STS_IOERR; switch (req_op(cmd->rq)) { case REQ_OP_FLUSH: case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: cmd->use_aio = false; break; default: cmd->use_aio = lo->use_dio; break; } /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { cmd->css = cmd->rq->bio->bi_css; css_get(cmd->css); } else #endif cmd->css = NULL; kthread_queue_work(&lo->worker, &cmd->work); return BLK_STS_OK; } static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = op_is_write(req_op(cmd->rq)); struct loop_device *lo = cmd->rq->q->queuedata; int ret = 0; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; goto failed; } ret = do_req_filebacked(lo, cmd->rq); failed: /* complete non-aio request */ if (!cmd->use_aio || ret) { cmd->ret = ret ? -EIO : 0; blk_mq_complete_request(cmd->rq); } } static void loop_queue_work(struct kthread_work *work) { struct loop_cmd *cmd = container_of(work, struct loop_cmd, work); loop_handle_cmd(cmd); } static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->rq = rq; kthread_init_work(&cmd->work, loop_queue_work); return 0; } static const struct blk_mq_ops loop_mq_ops = { .queue_rq = loop_queue_rq, .init_request = loop_init_request, .complete = lo_complete_rq, }; static int loop_add(struct loop_device **l, int i) { struct loop_device *lo; struct gendisk *disk; int err; err = -ENOMEM; lo = kzalloc(sizeof(*lo), GFP_KERNEL); if (!lo) goto out; lo->lo_state = Lo_unbound; /* allocate id, if @id >= 0, we're requesting that specific id */ if (i >= 0) { err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); if (err == -ENOSPC) err = -EEXIST; } else { err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); } if (err < 0) goto out_free_dev; i = err; err = -ENOMEM; lo->tag_set.ops = &loop_mq_ops; lo->tag_set.nr_hw_queues = 1; lo->tag_set.queue_depth = 128; lo->tag_set.numa_node = NUMA_NO_NODE; lo->tag_set.cmd_size = sizeof(struct loop_cmd); lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; lo->tag_set.driver_data = lo; err = blk_mq_alloc_tag_set(&lo->tag_set); if (err) goto out_free_idr; lo->lo_queue = blk_mq_init_queue(&lo->tag_set); if (IS_ERR_OR_NULL(lo->lo_queue)) { err = PTR_ERR(lo->lo_queue); goto out_cleanup_tags; } lo->lo_queue->queuedata = lo; blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); /* * By default, we do buffer IO, so it doesn't make sense to enable * merge because the I/O submitted to backing file is handled page by * page. For directio mode, merge does help to dispatch bigger request * to underlayer disk. We will enable merge once directio is enabled. */ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); err = -ENOMEM; disk = lo->lo_disk = alloc_disk(1 << part_shift); if (!disk) goto out_free_queue; /* * Disable partition scanning by default. The in-kernel partition * scanning can be requested individually per-device during its * setup. Userspace can always add and remove partitions from all * devices. The needed partition minors are allocated from the * extended minor space, the main loop device numbers will continue * to match the loop minors, regardless of the number of partitions * used. * * If max_part is given, partition scanning is globally enabled for * all loop devices. The minors for the main loop devices will be * multiples of max_part. * * Note: Global-for-all-devices, set-only-at-init, read-only module * parameteters like 'max_loop' and 'max_part' make things needlessly * complicated, are too static, inflexible and may surprise * userspace tools. Parameters like this in general should be avoided. */ if (!part_shift) disk->flags |= GENHD_FL_NO_PART_SCAN; disk->flags |= GENHD_FL_EXT_DEVT; mutex_init(&lo->lo_ctl_mutex); atomic_set(&lo->lo_refcnt, 0); lo->lo_number = i; spin_lock_init(&lo->lo_lock); disk->major = LOOP_MAJOR; disk->first_minor = i << part_shift; disk->fops = &lo_fops; disk->private_data = lo; disk->queue = lo->lo_queue; sprintf(disk->disk_name, "loop%d", i); add_disk(disk); *l = lo; return lo->lo_number; out_free_queue: blk_cleanup_queue(lo->lo_queue); out_cleanup_tags: blk_mq_free_tag_set(&lo->tag_set); out_free_idr: idr_remove(&loop_index_idr, i); out_free_dev: kfree(lo); out: return err; } static void loop_remove(struct loop_device *lo) { blk_cleanup_queue(lo->lo_queue); del_gendisk(lo->lo_disk); blk_mq_free_tag_set(&lo->tag_set); put_disk(lo->lo_disk); kfree(lo); } static int find_free_cb(int id, void *ptr, void *data) { struct loop_device *lo = ptr; struct loop_device **l = data; if (lo->lo_state == Lo_unbound) { *l = lo; return 1; } return 0; } static int loop_lookup(struct loop_device **l, int i) { struct loop_device *lo; int ret = -ENODEV; if (i < 0) { int err; err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); if (err == 1) { *l = lo; ret = lo->lo_number; } goto out; } /* lookup and return a specific i */ lo = idr_find(&loop_index_idr, i); if (lo) { *l = lo; ret = lo->lo_number; } out: return ret; } static struct kobject *loop_probe(dev_t dev, int *part, void *data) { struct loop_device *lo; struct kobject *kobj; int err; mutex_lock(&loop_index_mutex); err = loop_lookup(&lo, MINOR(dev) >> part_shift); if (err < 0) err = loop_add(&lo, MINOR(dev) >> part_shift); if (err < 0) kobj = NULL; else kobj = get_disk(lo->lo_disk); mutex_unlock(&loop_index_mutex); *part = 0; return kobj; } static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { struct loop_device *lo; int ret = -ENOSYS; mutex_lock(&loop_index_mutex); switch (cmd) { case LOOP_CTL_ADD: ret = loop_lookup(&lo, parm); if (ret >= 0) { ret = -EEXIST; break; } ret = loop_add(&lo, parm); break; case LOOP_CTL_REMOVE: ret = loop_lookup(&lo, parm); if (ret < 0) break; mutex_lock(&lo->lo_ctl_mutex); if (lo->lo_state != Lo_unbound) { ret = -EBUSY; mutex_unlock(&lo->lo_ctl_mutex); break; } if (atomic_read(&lo->lo_refcnt) > 0) { ret = -EBUSY; mutex_unlock(&lo->lo_ctl_mutex); break; } lo->lo_disk->private_data = NULL; mutex_unlock(&lo->lo_ctl_mutex); idr_remove(&loop_index_idr, lo->lo_number); loop_remove(lo); break; case LOOP_CTL_GET_FREE: ret = loop_lookup(&lo, -1); if (ret >= 0) break; ret = loop_add(&lo, -1); } mutex_unlock(&loop_index_mutex); return ret; } static const struct file_operations loop_ctl_fops = { .open = nonseekable_open, .unlocked_ioctl = loop_control_ioctl, .compat_ioctl = loop_control_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice loop_misc = { .minor = LOOP_CTRL_MINOR, .name = "loop-control", .fops = &loop_ctl_fops, }; MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { int i, nr; unsigned long range; struct loop_device *lo; int err; part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); /* * Adjust max_part according to part_shift as it is exported * to user space so that user can decide correct minor number * if [s]he want to create more devices. * * Note that -1 is required because partition 0 is reserved * for the whole disk. */ max_part = (1UL << part_shift) - 1; } if ((1UL << part_shift) > DISK_MAX_PARTS) { err = -EINVAL; goto err_out; } if (max_loop > 1UL << (MINORBITS - part_shift)) { err = -EINVAL; goto err_out; } /* * If max_loop is specified, create that many devices upfront. * This also becomes a hard limit. If max_loop is not specified, * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module * init time. Loop devices can be requested on-demand with the * /dev/loop-control interface, or be instantiated by accessing * a 'dead' device node. */ if (max_loop) { nr = max_loop; range = max_loop << part_shift; } else { nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; range = 1UL << MINORBITS; } err = misc_register(&loop_misc); if (err < 0) goto err_out; if (register_blkdev(LOOP_MAJOR, "loop")) { err = -EIO; goto misc_out; } blk_register_region(MKDEV(LOOP_MAJOR, 0), range, THIS_MODULE, loop_probe, NULL, NULL); /* pre-create number of devices given by config or max_loop */ mutex_lock(&loop_index_mutex); for (i = 0; i < nr; i++) loop_add(&lo, i); mutex_unlock(&loop_index_mutex); printk(KERN_INFO "loop: module loaded\n"); return 0; misc_out: misc_deregister(&loop_misc); err_out: return err; } static int loop_exit_cb(int id, void *ptr, void *data) { struct loop_device *lo = ptr; loop_remove(lo); return 0; } static void __exit loop_exit(void) { unsigned long range; range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); } module_init(loop_init); module_exit(loop_exit); #ifndef MODULE static int __init max_loop_setup(char *str) { max_loop = simple_strtol(str, NULL, 0); return 1; } __setup("max_loop=", max_loop_setup); #endif
./CrossVul/dataset_final_sorted/CWE-416/c/good_574_0
crossvul-cpp_data_good_5354_0
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PACKET - implements raw packet sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * Fixes: * Alan Cox : verify_area() now used correctly * Alan Cox : new skbuff lists, look ma no backlogs! * Alan Cox : tidied skbuff lists. * Alan Cox : Now uses generic datagram routines I * added. Also fixed the peek/read crash * from all old Linux datagram code. * Alan Cox : Uses the improved datagram code. * Alan Cox : Added NULL's for socket options. * Alan Cox : Re-commented the code. * Alan Cox : Use new kernel side addressing * Rob Janssen : Correct MTU usage. * Dave Platt : Counter leaks caused by incorrect * interrupt locking and some slightly * dubious gcc output. Can you read * compiler: it said _VOLATILE_ * Richard Kooijman : Timestamp fixes. * Alan Cox : New buffers. Use sk->mac.raw. * Alan Cox : sendmsg/recvmsg support. * Alan Cox : Protocol setting support * Alexey Kuznetsov : Untied from IPv4 stack. * Cyrus Durgin : Fixed kerneld for kmod. * Michal Ostrowski : Module initialization cleanup. * Ulises Alonso : Frame number limit removal and * packet_set_ring memory leak. * Eric Biederman : Allow for > 8 byte hardware addresses. * The convention is that longer addresses * will simply extend the hardware address * byte arrays at the end of sockaddr_ll * and packet_mreq. * Johann Baudy : Added TX RING. * Chetan Loke : Implemented TPACKET_V3 block abstraction * layer. * Copyright (C) 2011, <lokec@ccs.neu.edu> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/mm.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/if_vlan.h> #include <linux/virtio_net.h> #include <linux/errqueue.h> #include <linux/net_tstamp.h> #include <linux/percpu.h> #ifdef CONFIG_INET #include <net/inet_common.h> #endif #include <linux/bpf.h> #include <net/compat.h> #include "internal.h" /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back. On receive: ----------- Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header. On transmit: ------------ dev->hard_header != NULL mac_header -> ll header data -> ll header dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data We should set nh.raw on output to correct posistion, packet classifier depends on it. */ /* Private packet socket structures. */ /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; }; union tpacket_uhdr { struct tpacket_hdr *h1; struct tpacket2_hdr *h2; struct tpacket3_hdr *h3; void *raw; }; static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring); #define V3_ALIGNMENT (8) #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) #define BLK_PLUS_PRIV(sz_of_priv) \ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) #define PGV_FROM_VMALLOC 1 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len) #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num) #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x))) struct packet_sock; static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status); static void packet_increment_head(struct packet_ring_buffer *buff); static int prb_curr_blk_in_use(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, struct packet_sock *); static void prb_retire_current_block(struct tpacket_kbdq_core *, struct packet_sock *, unsigned int status); static int prb_queue_frozen(struct tpacket_kbdq_core *); static void prb_open_block(struct tpacket_kbdq_core *, struct tpacket_block_desc *); static void prb_retire_rx_blk_timer_expired(unsigned long); static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *); static void prb_init_blk_timer(struct packet_sock *, struct tpacket_kbdq_core *, void (*func) (unsigned long)); static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_clear_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void prb_fill_vlan_info(struct tpacket_kbdq_core *, struct tpacket3_hdr *); static void packet_flush_mclist(struct sock *sk); struct packet_skb_cb { union { struct sockaddr_pkt pkt; union { /* Trick: alias skb original length with * ll.sll_family and ll.protocol in order * to save room. */ unsigned int origlen; struct sockaddr_ll ll; }; } sa; }; #define vio_le() virtio_legacy_is_little_endian() #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) #define GET_PBLOCK_DESC(x, bid) \ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) #define GET_NEXT_PRB_BLK_NUM(x) \ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ ((x)->kactive_blk_num+1) : 0) static void __fanout_unlink(struct sock *sk, struct packet_sock *po); static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) goto drop; skb = validate_xmit_skb_list(skb, dev); if (skb != orig_skb) goto drop; txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); if (!dev_xmit_complete(ret)) kfree_skb(skb); return ret; drop: atomic_long_inc(&dev->tx_dropped); kfree_skb_list(skb); return NET_XMIT_DROP; } static struct net_device *packet_cached_dev_get(struct packet_sock *po) { struct net_device *dev; rcu_read_lock(); dev = rcu_dereference(po->cached_dev); if (likely(dev)) dev_hold(dev); rcu_read_unlock(); return dev; } static void packet_cached_dev_assign(struct packet_sock *po, struct net_device *dev) { rcu_assign_pointer(po->cached_dev, dev); } static void packet_cached_dev_reset(struct packet_sock *po) { RCU_INIT_POINTER(po->cached_dev, NULL); } static bool packet_use_direct_xmit(const struct packet_sock *po) { return po->xmit == packet_direct_xmit; } static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; } static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) { const struct net_device_ops *ops = dev->netdev_ops; u16 queue_index; if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb, NULL, __packet_pick_tx_queue); queue_index = netdev_cap_txqueue(dev, queue_index); } else { queue_index = __packet_pick_tx_queue(dev, skb); } skb_set_queue_mapping(skb, queue_index); } /* register_prot_hook must be invoked with the po->bind_lock held, * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ static void register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); if (!po->running) { if (po->fanout) __fanout_link(sk, po); else dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } } /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock * held. If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the * callers responsibility to take care of this. */ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); po->running = 0; if (po->fanout) __fanout_unlink(sk, po); else __dev_remove_pack(&po->prot_hook); __sock_put(sk); if (sync) { spin_unlock(&po->bind_lock); synchronize_net(); spin_lock(&po->bind_lock); } } static void unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); if (po->running) __unregister_prot_hook(sk, sync); } static inline struct page * __pure pgv_to_page(void *addr) { if (is_vmalloc_addr(addr)) return vmalloc_to_page(addr); return virt_to_page(addr); } static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_status = status; flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: h.h2->tp_status = status; flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); } smp_wmb(); } static int __packet_get_status(struct packet_sock *po, void *frame) { union tpacket_uhdr h; smp_rmb(); h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); return h.h1->tp_status; case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); return h.h2->tp_status; case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return 0; } } static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts, unsigned int flags) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); if (shhwtstamps && (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts)) return TP_STATUS_TS_RAW_HARDWARE; if (ktime_to_timespec_cond(skb->tstamp, ts)) return TP_STATUS_TS_SOFTWARE; return 0; } static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, struct sk_buff *skb) { union tpacket_uhdr h; struct timespec ts; __u32 ts_status; if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) return 0; h.raw = frame; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; break; case TPACKET_V2: h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; break; case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); } /* one flush is safe, as both fields always lie on the same cacheline */ flush_dcache_page(pgv_to_page(&h.h1->tp_sec)); smp_wmb(); return ts_status; } static void *packet_lookup_frame(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int position, int status) { unsigned int pg_vec_pos, frame_offset; union tpacket_uhdr h; pg_vec_pos = position / rb->frames_per_block; frame_offset = position % rb->frames_per_block; h.raw = rb->pg_vec[pg_vec_pos].buffer + (frame_offset * rb->frame_size); if (status != __packet_get_status(po, h.raw)) return NULL; return h.raw; } static void *packet_current_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { return packet_lookup_frame(po, rb, rb->head, status); } static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); } static void prb_shutdown_retire_blk_timer(struct packet_sock *po, struct sk_buff_head *rb_queue) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); spin_lock_bh(&rb_queue->lock); pkc->delete_blk_timer = 1; spin_unlock_bh(&rb_queue->lock); prb_del_retire_blk_timer(pkc); } static void prb_init_blk_timer(struct packet_sock *po, struct tpacket_kbdq_core *pkc, void (*func) (unsigned long)) { init_timer(&pkc->retire_blk_timer); pkc->retire_blk_timer.data = (long)po; pkc->retire_blk_timer.function = func; pkc->retire_blk_timer.expires = jiffies; } static void prb_setup_retire_blk_timer(struct packet_sock *po) { struct tpacket_kbdq_core *pkc; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); } static int prb_calc_retire_blk_tmo(struct packet_sock *po, int blk_size_in_bytes) { struct net_device *dev; unsigned int mbits = 0, msec = 0, div = 0, tmo = 0; struct ethtool_link_ksettings ecmd; int err; rtnl_lock(); dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); if (unlikely(!dev)) { rtnl_unlock(); return DEFAULT_PRB_RETIRE_TOV; } err = __ethtool_get_link_ksettings(dev, &ecmd); rtnl_unlock(); if (!err) { /* * If the link speed is so slow you don't really * need to worry about perf anyways */ if (ecmd.base.speed < SPEED_1000 || ecmd.base.speed == SPEED_UNKNOWN) { return DEFAULT_PRB_RETIRE_TOV; } else { msec = 1; div = ecmd.base.speed / 1000; } } mbits = (blk_size_in_bytes * 8) / (1024 * 1024); if (div) mbits /= div; tmo = mbits * msec; if (div) return tmo+1; return tmo; } static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, union tpacket_req_u *req_u) { p1->feature_req_word = req_u->req3.tp_feature_req_word; } static void init_prb_bdqc(struct packet_sock *po, struct packet_ring_buffer *rb, struct pgv *pg_vec, union tpacket_req_u *req_u) { struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd; memset(p1, 0x0, sizeof(*p1)); p1->knxt_seq_num = 1; p1->pkbdq = pg_vec; pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; p1->pkblk_start = pg_vec[0].buffer; p1->kblk_size = req_u->req3.tp_block_size; p1->knum_blocks = req_u->req3.tp_block_nr; p1->hdrlen = po->tp_hdrlen; p1->version = po->tp_version; p1->last_kactive_blk_num = 0; po->stats.stats3.tp_freeze_q_cnt = 0; if (req_u->req3.tp_retire_blk_tov) p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov; else p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, req_u->req3.tp_block_size); p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); prb_init_ft_ops(p1, req_u); prb_setup_retire_blk_timer(po); prb_open_block(p1, pbd); } /* Do NOT update the last_blk_num first. * Assumes sk_buff_head lock is held. */ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc) { mod_timer(&pkc->retire_blk_timer, jiffies + pkc->tov_in_jiffies); pkc->last_kactive_blk_num = pkc->kactive_blk_num; } /* * Timer logic: * 1) We refresh the timer only when we open a block. * By doing this we don't waste cycles refreshing the timer * on packet-by-packet basis. * * With a 1MB block-size, on a 1Gbps line, it will take * i) ~8 ms to fill a block + ii) memcpy etc. * In this cut we are not accounting for the memcpy time. * * So, if the user sets the 'tmo' to 10ms then the timer * will never fire while the block is still getting filled * (which is what we want). However, the user could choose * to close a block early and that's fine. * * But when the timer does fire, we check whether or not to refresh it. * Since the tmo granularity is in msecs, it is not too expensive * to refresh the timer, lets say every '8' msecs. * Either the user can set the 'tmo' or we can derive it based on * a) line-speed and b) block-size. * prb_calc_retire_blk_tmo() calculates the tmo. * */ static void prb_retire_rx_blk_timer_expired(unsigned long data) { struct packet_sock *po = (struct packet_sock *)data; struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); unsigned int frozen; struct tpacket_block_desc *pbd; spin_lock(&po->sk.sk_receive_queue.lock); frozen = prb_queue_frozen(pkc); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); if (unlikely(pkc->delete_blk_timer)) goto out; /* We only need to plug the race when the block is partially filled. * tpacket_rcv: * lock(); increment BLOCK_NUM_PKTS; unlock() * copy_bits() is in progress ... * timer fires on other cpu: * we can't retire the current block because copy_bits * is in progress. * */ if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { if (!frozen) { if (!BLOCK_NUM_PKTS(pbd)) { /* An empty block. Just refresh the timer. */ goto refresh_timer; } prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); if (!prb_dispatch_next_block(pkc, po)) goto refresh_timer; else goto out; } else { /* Case 1. Queue was frozen because user-space was * lagging behind. */ if (prb_curr_blk_in_use(pkc, pbd)) { /* * Ok, user-space is still behind. * So just refresh the timer. */ goto refresh_timer; } else { /* Case 2. queue was frozen,user-space caught up, * now the link went idle && the timer fired. * We don't have a block to close.So we open this * block and restart the timer. * opening a block thaws the queue,restarts timer * Thawing/timer-refresh is a side effect. */ prb_open_block(pkc, pbd); goto out; } } } refresh_timer: _prb_refresh_rx_retire_blk_timer(pkc); out: spin_unlock(&po->sk.sk_receive_queue.lock); } static void prb_flush_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, __u32 status) { /* Flush everything minus the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 u8 *start, *end; start = (u8 *)pbd1; /* Skip the block header(we know header WILL fit in 4K) */ start += PAGE_SIZE; end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); for (; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif /* Now update the block status. */ BLOCK_STATUS(pbd1) = status; /* Flush the block header */ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 start = (u8 *)pbd1; flush_dcache_page(pgv_to_page(start)); smp_wmb(); #endif } /* * Side effect: * * 1) flush the block * 2) Increment active_blk_num * * Note:We DONT refresh the timer on purpose. * Because almost always the next block will be opened. */ static void prb_close_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1, struct packet_sock *po, unsigned int stat) { __u32 status = TP_STATUS_USER | stat; struct tpacket3_hdr *last_pkt; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; struct sock *sk = &po->sk; if (po->stats.stats3.tp_drops) status |= TP_STATUS_LOSING; last_pkt = (struct tpacket3_hdr *)pkc1->prev; last_pkt->tp_next_offset = 0; /* Get the ts of the last pkt */ if (BLOCK_NUM_PKTS(pbd1)) { h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; } else { /* Ok, we tmo'd - so get the current time. * * It shouldn't really happen as we don't close empty * blocks. See prb_retire_rx_blk_timer_expired(). */ struct timespec ts; getnstimeofday(&ts); h1->ts_last_pkt.ts_sec = ts.tv_sec; h1->ts_last_pkt.ts_nsec = ts.tv_nsec; } smp_wmb(); /* Flush the block */ prb_flush_block(pkc1, pbd1, status); sk->sk_data_ready(sk); pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); } static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) { pkc->reset_pending_on_curr_blk = 0; } /* * Side effect of opening a block: * * 1) prb_queue is thawed. * 2) retire_blk_timer is refreshed. * */ static void prb_open_block(struct tpacket_kbdq_core *pkc1, struct tpacket_block_desc *pbd1) { struct timespec ts; struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; smp_rmb(); /* We could have just memset this but we will lose the * flexibility of making the priv area sticky */ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; BLOCK_NUM_PKTS(pbd1) = 0; BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); getnstimeofday(&ts); h1->ts_first_pkt.ts_sec = ts.tv_sec; h1->ts_first_pkt.ts_nsec = ts.tv_nsec; pkc1->pkblk_start = (char *)pbd1; pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; pbd1->version = pkc1->version; pkc1->prev = pkc1->nxt_offset; pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; prb_thaw_queue(pkc1); _prb_refresh_rx_retire_blk_timer(pkc1); smp_wmb(); } /* * Queue freeze logic: * 1) Assume tp_block_nr = 8 blocks. * 2) At time 't0', user opens Rx ring. * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 * 4) user-space is either sleeping or processing block '0'. * 5) tpacket_rcv is currently filling block '7', since there is no space left, * it will close block-7,loop around and try to fill block '0'. * call-flow: * __packet_lookup_frame_in_block * prb_retire_current_block() * prb_dispatch_next_block() * |->(BLOCK_STATUS == USER) evaluates to true * 5.1) Since block-0 is currently in-use, we just freeze the queue. * 6) Now there are two cases: * 6.1) Link goes idle right after the queue is frozen. * But remember, the last open_block() refreshed the timer. * When this timer expires,it will refresh itself so that we can * re-open block-0 in near future. * 6.2) Link is busy and keeps on receiving packets. This is a simple * case and __packet_lookup_frame_in_block will check if block-0 * is free and can now be re-used. */ static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { pkc->reset_pending_on_curr_blk = 1; po->stats.stats3.tp_freeze_q_cnt++; } #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) /* * If the next block is free then we will dispatch it * and return a good offset. * Else, we will freeze the queue. * So, caller must check the return value. */ static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po) { struct tpacket_block_desc *pbd; smp_rmb(); /* 1. Get current block num */ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* 2. If this block is currently in_use then freeze the queue */ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { prb_freeze_queue(pkc, po); return NULL; } /* * 3. * open this block and return the offset where the first packet * needs to get stored. */ prb_open_block(pkc, pbd); return (void *)pkc->nxt_offset; } static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, struct packet_sock *po, unsigned int status) { struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* retire/close the current block */ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { /* * Plug the case where copy_bits() is in progress on * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't * have space to copy the pkt in the current block and * called prb_retire_current_block() * * We don't need to worry about the TMO case because * the timer-handler already handled this case. */ if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ cpu_relax(); } } prb_close_block(pkc, pbd, po, status); return; } } static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd) { return TP_STATUS_USER & BLOCK_STATUS(pbd); } static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) { return pkc->reset_pending_on_curr_blk; } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); } static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb); } static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_rxhash = 0; } static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; ppd->tp_status = TP_STATUS_AVAILABLE; } } static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { ppd->hv1.tp_padding = 0; prb_fill_vlan_info(pkc, ppd); if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) prb_fill_rxhash(pkc, ppd); else prb_clear_rxhash(pkc, ppd); } static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) { struct tpacket3_hdr *ppd; ppd = (struct tpacket3_hdr *)curr; ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); pkc->prev = curr; pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_NUM_PKTS(pbd) += 1; atomic_inc(&pkc->blk_fill_in_prog); prb_run_all_ft_ops(pkc, ppd); } /* Assumes caller has the sk->rx_queue.lock */ static void *__packet_lookup_frame_in_block(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len ) { struct tpacket_kbdq_core *pkc; struct tpacket_block_desc *pbd; char *curr, *end; pkc = GET_PBDQC_FROM_RB(&po->rx_ring); pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); /* Queue is frozen when user space is lagging behind */ if (prb_queue_frozen(pkc)) { /* * Check if that last block which caused the queue to freeze, * is still in_use by user-space. */ if (prb_curr_blk_in_use(pkc, pbd)) { /* Can't record this packet */ return NULL; } else { /* * Ok, the block was released by user-space. * Now let's open that block. * opening a block also thaws the queue. * Thawing is a side effect. */ prb_open_block(pkc, pbd); } } smp_mb(); curr = pkc->nxt_offset; pkc->skb = skb; end = (char *)pbd + pkc->kblk_size; /* first try the current block */ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* Ok, close the current block */ prb_retire_current_block(pkc, po, 0); /* Now, try to dispatch the next block */ curr = (char *)prb_dispatch_next_block(pkc, po); if (curr) { pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); prb_fill_curr_block(curr, pkc, pbd, len); return (void *)curr; } /* * No free blocks are available.user_space hasn't caught up yet. * Queue was just frozen and now this packet will get dropped. */ return NULL; } static void *packet_current_rx_frame(struct packet_sock *po, struct sk_buff *skb, int status, unsigned int len) { char *curr = NULL; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: curr = packet_lookup_frame(po, &po->rx_ring, po->rx_ring.head, status); return curr; case TPACKET_V3: return __packet_lookup_frame_in_block(po, skb, status, len); default: WARN(1, "TPACKET version not supported\n"); BUG(); return NULL; } } static void *prb_lookup_block(struct packet_sock *po, struct packet_ring_buffer *rb, unsigned int idx, int status) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); if (status != BLOCK_STATUS(pbd)) return NULL; return pbd; } static int prb_previous_blk_num(struct packet_ring_buffer *rb) { unsigned int prev; if (rb->prb_bdqc.kactive_blk_num) prev = rb->prb_bdqc.kactive_blk_num-1; else prev = rb->prb_bdqc.knum_blocks-1; return prev; } /* Assumes caller has held the rx_queue.lock */ static void *__prb_previous_block(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = prb_previous_blk_num(rb); return prb_lookup_block(po, rb, previous, status); } static void *packet_previous_rx_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { if (po->tp_version <= TPACKET_V2) return packet_previous_frame(po, rb, status); return __prb_previous_block(po, rb, status); } static void packet_increment_rx_head(struct packet_sock *po, struct packet_ring_buffer *rb) { switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: return packet_increment_head(rb); case TPACKET_V3: default: WARN(1, "TPACKET version not supported.\n"); BUG(); return; } } static void *packet_previous_frame(struct packet_sock *po, struct packet_ring_buffer *rb, int status) { unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; return packet_lookup_frame(po, rb, previous, status); } static void packet_increment_head(struct packet_ring_buffer *buff) { buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; } static void packet_inc_pending(struct packet_ring_buffer *rb) { this_cpu_inc(*rb->pending_refcnt); } static void packet_dec_pending(struct packet_ring_buffer *rb) { this_cpu_dec(*rb->pending_refcnt); } static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) { unsigned int refcnt = 0; int cpu; /* We don't use pending refcount in rx_ring. */ if (rb->pending_refcnt == NULL) return 0; for_each_possible_cpu(cpu) refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); return refcnt; } static int packet_alloc_pending(struct packet_sock *po) { po->rx_ring.pending_refcnt = NULL; po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); if (unlikely(po->tx_ring.pending_refcnt == NULL)) return -ENOBUFS; return 0; } static void packet_free_pending(struct packet_sock *po) { free_percpu(po->tx_ring.pending_refcnt); } #define ROOM_POW_OFF 2 #define ROOM_NONE 0x0 #define ROOM_LOW 0x1 #define ROOM_NORMAL 0x2 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.frame_max + 1; idx = po->rx_ring.head; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) { int idx, len; len = po->rx_ring.prb_bdqc.knum_blocks; idx = po->rx_ring.prb_bdqc.kactive_blk_num; if (pow_off) idx += len >> pow_off; if (idx >= len) idx -= len; return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); } static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { struct sock *sk = &po->sk; int ret = ROOM_NONE; if (po->prot_hook.func != tpacket_rcv) { int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) - (skb ? skb->truesize : 0); if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) return ROOM_NORMAL; else if (avail > 0) return ROOM_LOW; else return ROOM_NONE; } if (po->tp_version == TPACKET_V3) { if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_v3_has_room(po, 0)) ret = ROOM_LOW; } else { if (__tpacket_has_room(po, ROOM_POW_OFF)) ret = ROOM_NORMAL; else if (__tpacket_has_room(po, 0)) ret = ROOM_LOW; } return ret; } static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) { int ret; bool has_room; spin_lock_bh(&po->sk.sk_receive_queue.lock); ret = __packet_rcv_has_room(po, skb); has_room = ret == ROOM_NORMAL; if (po->pressure == has_room) po->pressure = !has_room; spin_unlock_bh(&po->sk.sk_receive_queue.lock); return ret; } static void packet_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_error_queue); WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive packet socket: %p\n", sk); return; } sk_refcnt_debug_dec(sk); } static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) { u32 rxhash; int i, count = 0; rxhash = skb_get_hash(skb); for (i = 0; i < ROLLOVER_HLEN; i++) if (po->rollover->history[i] == rxhash) count++; po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; return count > (ROLLOVER_HLEN >> 1); } static unsigned int fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return reciprocal_scale(__skb_get_hash_symmetric(skb), num); } static unsigned int fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { unsigned int val = atomic_inc_return(&f->rr_cur); return val % num; } static unsigned int fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return smp_processor_id() % num; } static unsigned int fanout_demux_rnd(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return prandom_u32_max(num); } static unsigned int fanout_demux_rollover(struct packet_fanout *f, struct sk_buff *skb, unsigned int idx, bool try_self, unsigned int num) { struct packet_sock *po, *po_next, *po_skip = NULL; unsigned int i, j, room = ROOM_NONE; po = pkt_sk(f->arr[idx]); if (try_self) { room = packet_rcv_has_room(po, skb); if (room == ROOM_NORMAL || (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) return idx; po_skip = po; } i = j = min_t(int, po->rollover->sock, num - 1); do { po_next = pkt_sk(f->arr[i]); if (po_next != po_skip && !po_next->pressure && packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) { if (i != j) po->rollover->sock = i; atomic_long_inc(&po->rollover->num); if (room == ROOM_LOW) atomic_long_inc(&po->rollover->num_huge); return i; } if (++i == num) i = 0; } while (i != j); atomic_long_inc(&po->rollover->num_failed); return idx; } static unsigned int fanout_demux_qm(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { return skb_get_queue_mapping(skb) % num; } static unsigned int fanout_demux_bpf(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) { struct bpf_prog *prog; unsigned int ret = 0; rcu_read_lock(); prog = rcu_dereference(f->bpf_prog); if (prog) ret = bpf_prog_run_clear_cb(prog, skb) % num; rcu_read_unlock(); return ret; } static bool fanout_has_flag(struct packet_fanout *f, u16 flag) { return f->flags & (flag >> 8); } static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct packet_fanout *f = pt->af_packet_priv; unsigned int num = READ_ONCE(f->num_members); struct net *net = read_pnet(&f->net); struct packet_sock *po; unsigned int idx; if (!net_eq(dev_net(dev), net) || !num) { kfree_skb(skb); return 0; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET); if (!skb) return 0; } switch (f->type) { case PACKET_FANOUT_HASH: default: idx = fanout_demux_hash(f, skb, num); break; case PACKET_FANOUT_LB: idx = fanout_demux_lb(f, skb, num); break; case PACKET_FANOUT_CPU: idx = fanout_demux_cpu(f, skb, num); break; case PACKET_FANOUT_RND: idx = fanout_demux_rnd(f, skb, num); break; case PACKET_FANOUT_QM: idx = fanout_demux_qm(f, skb, num); break; case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, false, num); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: idx = fanout_demux_bpf(f, skb, num); break; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) idx = fanout_demux_rollover(f, skb, idx, true, num); po = pkt_sk(f->arr[idx]); return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); } DEFINE_MUTEX(fanout_mutex); EXPORT_SYMBOL_GPL(fanout_mutex); static LIST_HEAD(fanout_list); static void __fanout_link(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; spin_lock(&f->lock); f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; spin_unlock(&f->lock); } static void __fanout_unlink(struct sock *sk, struct packet_sock *po) { struct packet_fanout *f = po->fanout; int i; spin_lock(&f->lock); for (i = 0; i < f->num_members; i++) { if (f->arr[i] == sk) break; } BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; spin_unlock(&f->lock); } static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) { if (sk->sk_family != PF_PACKET) return false; return ptype->af_packet_priv == pkt_sk(sk)->fanout; } static void fanout_init_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_LB: atomic_set(&f->rr_cur, 0); break; case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: RCU_INIT_POINTER(f->bpf_prog, NULL); break; } } static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) { struct bpf_prog *old; spin_lock(&f->lock); old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); rcu_assign_pointer(f->bpf_prog, new); spin_unlock(&f->lock); if (old) { synchronize_net(); bpf_prog_destroy(old); } } static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; struct sock_fprog fprog; int ret; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fprog)) return -EINVAL; if (copy_from_user(&fprog, data, len)) return -EFAULT; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) return ret; __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, unsigned int len) { struct bpf_prog *new; u32 fd; if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; if (len != sizeof(fd)) return -EINVAL; if (copy_from_user(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(new)) return PTR_ERR(new); __fanout_set_data_bpf(po->fanout, new); return 0; } static int fanout_set_data(struct packet_sock *po, char __user *data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: return -EINVAL; }; } static void fanout_release_data(struct packet_fanout *f) { switch (f->type) { case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: __fanout_set_data_bpf(f, NULL); }; } static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f, *match; u8 type = type_flags & 0xff; u8 flags = type_flags >> 8; int err; switch (type) { case PACKET_FANOUT_ROLLOVER: if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) return -EINVAL; case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: case PACKET_FANOUT_RND: case PACKET_FANOUT_QM: case PACKET_FANOUT_CBPF: case PACKET_FANOUT_EBPF: break; default: return -EINVAL; } if (!po->running) return -EINVAL; if (po->fanout) return -EALREADY; if (type == PACKET_FANOUT_ROLLOVER || (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); if (!po->rollover) return -ENOMEM; atomic_long_set(&po->rollover->num, 0); atomic_long_set(&po->rollover->num_huge, 0); atomic_long_set(&po->rollover->num_failed, 0); } mutex_lock(&fanout_mutex); match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && read_pnet(&f->net) == sock_net(sk)) { match = f; break; } } err = -EINVAL; if (match && match->flags != flags) goto out; if (!match) { err = -ENOMEM; match = kzalloc(sizeof(*match), GFP_KERNEL); if (!match) goto out; write_pnet(&match->net, sock_net(sk)); match->id = id; match->type = type; match->flags = flags; INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); atomic_set(&match->sk_ref, 0); fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } err = -EINVAL; if (match->type == type && match->prot_hook.type == po->prot_hook.type && match->prot_hook.dev == po->prot_hook.dev) { err = -ENOSPC; if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); po->fanout = match; atomic_inc(&match->sk_ref); __fanout_link(sk, po); err = 0; } } out: mutex_unlock(&fanout_mutex); if (err) { kfree(po->rollover); po->rollover = NULL; } return err; } static void fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; f = po->fanout; if (!f) return; mutex_lock(&fanout_mutex); po->fanout = NULL; if (atomic_dec_and_test(&f->sk_ref)) { list_del(&f->list); dev_remove_pack(&f->prot_hook); fanout_release_data(f); kfree(f); } mutex_unlock(&fanout_mutex); if (po->rollover) kfree_rcu(po->rollover, rcu); } static bool packet_extra_vlan_len_allowed(const struct net_device *dev, struct sk_buff *skb) { /* Earlier code assumed this would be a VLAN pkt, double-check * this now that we have the actual packet in hand. We can only * do this check on Ethernet devices. */ if (unlikely(dev->type != ARPHRD_ETHER)) return false; skb_reset_mac_header(skb); return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); } static const struct proto_ops packet_ops; static const struct proto_ops packet_ops_spkt; static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_pkt *spkt; /* * When we registered the protocol we saved the socket in the data * field for just this event. */ sk = pt->af_packet_priv; /* * Yank back the headers [hope the device set this * right or kerboom...] * * Incoming packets have ll header pulled, * push it back. * * For outgoing ones skb->data == skb_mac_header(skb) * so that this procedure is noop. */ if (skb->pkt_type == PACKET_LOOPBACK) goto out; if (!net_eq(dev_net(dev), sock_net(sk))) goto out; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto oom; /* drop any routing info */ skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spkt = &PACKET_SKB_CB(skb)->sa.pkt; skb_push(skb, skb->data - skb_mac_header(skb)); /* * The SOCK_PACKET socket receives _all_ frames. */ spkt->spkt_family = dev->type; strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* * Charge the memory to the socket. This is done specifically * to prevent sockets using all the memory up. */ if (sock_queue_rcv_skb(sk, skb) == 0) return 0; out: kfree_skb(skb); oom: return 0; } /* * Output a raw packet to a device layer. This bypasses all the other * protocol layers and you must therefore supply it with a complete frame */ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; /* * Get and verify the address. */ if (saddr) { if (msg->msg_namelen < sizeof(struct sockaddr)) return -EINVAL; if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) proto = saddr->spkt_protocol; } else return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ /* * Find the device first to size check it */ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; retry: rcu_read_lock(); dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; err = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_unlock; /* * You may not queue a frame bigger than the mtu. This is the lowest level * raw protocol and you must do your own fragmentation at this level. */ if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) goto out_unlock; if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard * header at transmission time by themselves. PPP is the notable * one here. This should really be fixed at the driver level. */ skb_reserve(skb, reserved); skb_reset_network_header(skb); /* Try to align data part correctly */ if (hhlen) { skb->data -= hhlen; skb->tail -= hhlen; if (len < hhlen) skb_reset_network_header(skb); } err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err) goto out_free; goto retry; } if (!dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_unlock; } if (len > (dev->mtu + dev->hard_header_len + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_unlock; } sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; skb_probe_transport_header(skb, 0); dev_queue_xmit(skb); rcu_read_unlock(); return len; out_unlock: rcu_read_unlock(); out_free: kfree_skb(skb); return err; } static unsigned int run_filter(struct sk_buff *skb, const struct sock *sk, unsigned int res) { struct sk_filter *filter; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter != NULL) res = bpf_prog_run_clear_cb(filter->prog, skb); rcu_read_unlock(); return res; } static int __packet_rcv_vnet(const struct sk_buff *skb, struct virtio_net_hdr *vnet_hdr) { *vnet_hdr = (const struct virtio_net_hdr) { 0 }; if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le())) BUG(); return 0; } static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, size_t *len) { struct virtio_net_hdr vnet_hdr; if (*len < sizeof(vnet_hdr)) return -EINVAL; *len -= sizeof(vnet_hdr); if (__packet_rcv_vnet(skb, &vnet_hdr)) return -EINVAL; return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); } /* * This function makes lazy skb cloning in hope that most of packets * are discarded by BPF. * * Note tricky part: we DO mangle shared skb! skb->data, skb->len * and skb->cb are mangled. It works because (and until) packets * falling here are owned by current CPU. Output packets are cloned * by dev_queue_xmit_nit(), input packets are processed by net_bh * sequencially, so that if we return skb to original state on exit, * we will not harm anyone. */ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; bool is_drop_n_account = false; if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; skb->dev = dev; if (dev->header_ops) { /* The device has an explicit notion of ll header, * exported to higher levels. * * Otherwise, the device hides details of its frame * structure, so that corresponding packet head is * never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto drop_n_acct; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct; if (skb_head != skb->data) { skb->data = skb_head; skb->len = skb_len; } consume_skb(skb); skb = nskb; } sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); sll = &PACKET_SKB_CB(skb)->sa.ll; sll->sll_hatype = dev->type; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; sll->sll_halen = dev_parse_header(skb, sll->sll_addr); /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). * Use their space for storing the original skb length. */ PACKET_SKB_CB(skb)->sa.origlen = skb->len; if (pskb_trim(skb, snaplen)) goto drop_n_acct; skb_set_owner_r(skb, sk); skb->dev = NULL; skb_dst_drop(skb); /* drop conntrack reference */ nf_reset(skb); spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_packets++; sock_skb_set_dropcount(sk, skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); return 0; drop_n_acct: is_drop_n_account = true; spin_lock(&sk->sk_receive_queue.lock); po->stats.stats1.tp_drops++; atomic_inc(&sk->sk_drops); spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; } static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; unsigned short macoff, netoff, hdrlen; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing * userspace to call getsockopt(..., PACKET_HDRLEN, ...). */ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); if (!net_eq(dev_net(dev), sock_net(sk))) goto drop; if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull(skb, skb_network_offset(skb)); } } snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) snaplen = res; if (sk->sk_type == SOCK_DGRAM) { macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + po->tp_reserve; } else { unsigned int maclen = skb_network_offset(skb); netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; if (po->has_vnet_hdr) netoff += sizeof(struct virtio_net_hdr); macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { if (skb_shared(skb)) { copy_skb = skb_clone(skb, GFP_ATOMIC); } else { copy_skb = skb_get(skb); skb_head = skb->data; } if (copy_skb) skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) snaplen = 0; } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { u32 nval; nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", snaplen, nval, macoff); snaplen = nval; if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; } } spin_lock(&sk->sk_receive_queue.lock); h.raw = packet_current_rx_frame(po, skb, TP_STATUS_KERNEL, (macoff+snaplen)); if (!h.raw) goto drop_n_account; if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); /* * LOSING will be reported till you read the stats, * because it's COR - Clear On Read. * Anyways, moving it for V1/V2 only as V3 doesn't need this * at packet level. */ if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; __skb_queue_tail(&sk->sk_receive_queue, copy_skb); } spin_unlock(&sk->sk_receive_queue.lock); if (po->has_vnet_hdr) { if (__packet_rcv_vnet(skb, h.raw + macoff - sizeof(struct virtio_net_hdr))) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } } skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) getnstimeofday(&ts); status |= ts_status; switch (po->tp_version) { case TPACKET_V1: h.h1->tp_len = skb->len; h.h1->tp_snaplen = snaplen; h.h1->tp_mac = macoff; h.h1->tp_net = netoff; h.h1->tp_sec = ts.tv_sec; h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; hdrlen = sizeof(*h.h1); break; case TPACKET_V2: h.h2->tp_len = skb->len; h.h2->tp_snaplen = snaplen; h.h2->tp_mac = macoff; h.h2->tp_net = netoff; h.h2->tp_sec = ts.tv_sec; h.h2->tp_nsec = ts.tv_nsec; if (skb_vlan_tag_present(skb)) { h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; } memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding)); hdrlen = sizeof(*h.h2); break; case TPACKET_V3: /* tp_nxt_offset,vlan are already populated above. * So DONT clear those fields here */ h.h3->tp_status |= status; h.h3->tp_len = skb->len; h.h3->tp_snaplen = snaplen; h.h3->tp_mac = macoff; h.h3->tp_net = netoff; h.h3->tp_sec = ts.tv_sec; h.h3->tp_nsec = ts.tv_nsec; memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding)); hdrlen = sizeof(*h.h3); break; default: BUG(); } sll = h.raw + TPACKET_ALIGN(hdrlen); sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(po->origdev)) sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; smp_mb(); #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 if (po->tp_version <= TPACKET_V2) { u8 *start, *end; end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + macoff + snaplen); for (start = h.raw; start < end; start += PAGE_SIZE) flush_dcache_page(pgv_to_page(start)); } smp_wmb(); #endif if (po->tp_version <= TPACKET_V2) { __packet_set_status(po, h.raw, status); sk->sk_data_ready(sk); } else { prb_clear_blk_fill_status(&po->rx_ring); } drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { skb->data = skb_head; skb->len = skb_len; } drop: if (!is_drop_n_account) consume_skb(skb); else kfree_skb(skb); return 0; drop_n_account: is_drop_n_account = true; po->stats.stats1.tp_drops++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); kfree_skb(copy_skb); goto drop_n_restore; } static void tpacket_destruct_skb(struct sk_buff *skb) { struct packet_sock *po = pkt_sk(skb->sk); if (likely(po->tx_ring.pg_vec)) { void *ph; __u32 ts; ph = skb_shinfo(skb)->destructor_arg; packet_dec_pending(&po->tx_ring); ts = __packet_set_timestamp(po, ph, skb); __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); } sock_wfree(skb); } static void tpacket_set_protocol(const struct net_device *dev, struct sk_buff *skb) { if (dev->type == ARPHRD_ETHER) { skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; } } static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) { unsigned short gso_type = 0; if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 > __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len))) vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) + __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2); if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len) return -EINVAL; if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; break; default: return -EINVAL; } if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) gso_type |= SKB_GSO_TCP_ECN; if (vnet_hdr->gso_size == 0) return -EINVAL; } vnet_hdr->gso_type = gso_type; /* changes type, temporary storage */ return 0; } static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, struct virtio_net_hdr *vnet_hdr) { int n; if (*len < sizeof(*vnet_hdr)) return -EINVAL; *len -= sizeof(*vnet_hdr); n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter); if (n != sizeof(*vnet_hdr)) return -EFAULT; return __packet_snd_vnet_parse(vnet_hdr, *len); } static int packet_snd_vnet_gso(struct sk_buff *skb, struct virtio_net_hdr *vnet_hdr) { if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start); u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset); if (!skb_partial_csum_set(skb, s, o)) return -EINVAL; } skb_shinfo(skb)->gso_size = __virtio16_to_cpu(vio_le(), vnet_hdr->gso_size); skb_shinfo(skb)->gso_type = vnet_hdr->gso_type; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, __be16 proto, unsigned char *addr, int hlen, int copylen, const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; struct socket *sock = po->sk.sk_socket; struct page *page; int err; ph.raw = frame; skb->protocol = proto; skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); skb_reset_network_header(skb); to_write = tp_len; if (sock->type == SOCK_DGRAM) { err = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, tp_len); if (unlikely(err < 0)) return -EINVAL; } else if (copylen) { int hdrlen = min_t(int, copylen, tp_len); skb_push(skb, dev->hard_header_len); skb_put(skb, copylen - dev->hard_header_len); err = skb_store_bits(skb, 0, data, hdrlen); if (unlikely(err)) return err; if (!dev_validate_header(dev, skb->data, hdrlen)) return -EINVAL; if (!skb->protocol) tpacket_set_protocol(dev, skb); data += hdrlen; to_write -= hdrlen; } offset = offset_in_page(data); len_max = PAGE_SIZE - offset; len = ((to_write > len_max) ? len_max : to_write); skb->data_len = to_write; skb->len += to_write; skb->truesize += to_write; atomic_add(to_write, &po->sk.sk_wmem_alloc); while (likely(to_write)) { nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { pr_err("Packet exceed the number of skb frags(%lu)\n", MAX_SKB_FRAGS); return -EFAULT; } page = pgv_to_page(data); data += len; flush_dcache_page(page); get_page(page); skb_fill_page_desc(skb, nr_frags, page, offset, len); to_write -= len; offset = 0; len_max = PAGE_SIZE; len = ((to_write > len_max) ? len_max : to_write); } skb_probe_transport_header(skb, 0); return tp_len; } static int tpacket_parse_header(struct packet_sock *po, void *frame, int size_max, void **data) { union tpacket_uhdr ph; int tp_len, off; ph.raw = frame; switch (po->tp_version) { case TPACKET_V2: tp_len = ph.h2->tp_len; break; default: tp_len = ph.h1->tp_len; break; } if (unlikely(tp_len > size_max)) { pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); return -EMSGSIZE; } if (unlikely(po->tp_tx_has_off)) { int off_min, off_max; off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); off_max = po->tx_ring.frame_size - tp_len; if (po->sk.sk_type == SOCK_DGRAM) { switch (po->tp_version) { case TPACKET_V2: off = ph.h2->tp_net; break; default: off = ph.h1->tp_net; break; } } else { switch (po->tp_version) { case TPACKET_V2: off = ph.h2->tp_mac; break; default: off = ph.h1->tp_mac; break; } } if (unlikely((off < off_min) || (off_max < off))) return -EINVAL; } else { off = po->tp_hdrlen - sizeof(struct sockaddr_ll); } *data = frame + off; return tp_len; } static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) { struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); int tp_len, size_max; unsigned char *addr; void *data; int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; mutex_lock(&po->pg_vec_lock); if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) goto out; } err = -ENXIO; if (unlikely(dev == NULL)) goto out; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_put; if (po->sk.sk_socket->type == SOCK_RAW) reserve = dev->hard_header_len; size_max = po->tx_ring.frame_size - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) size_max = dev->mtu + reserve + VLAN_HLEN; do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { if (need_wait && need_resched()) schedule(); continue; } skb = NULL; tp_len = tpacket_parse_header(po, ph, size_max, &data); if (tp_len < 0) goto tpacket_error; status = TP_STATUS_SEND_REQUEST; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; if (po->has_vnet_hdr) { vnet_hdr = data; data += sizeof(*vnet_hdr); tp_len -= sizeof(*vnet_hdr); if (tp_len < 0 || __packet_snd_vnet_parse(vnet_hdr, tp_len)) { tp_len = -EINVAL; goto tpacket_error; } copylen = __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len); } copylen = max_t(int, copylen, dev->hard_header_len); skb = sock_alloc_send_skb(&po->sk, hlen + tlen + sizeof(struct sockaddr_ll) + (copylen - dev->hard_header_len), !need_wait, &err); if (unlikely(skb == NULL)) { /* we assume the socket was initially writeable ... */ if (likely(len_sum > 0)) err = len_sum; goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && !packet_extra_vlan_len_allowed(dev, skb)) tp_len = -EMSGSIZE; if (unlikely(tp_len < 0)) { tpacket_error: if (po->tp_loss) { __packet_set_status(po, ph, TP_STATUS_AVAILABLE); packet_increment_head(&po->tx_ring); kfree_skb(skb); continue; } else { status = TP_STATUS_WRONG_FORMAT; err = tp_len; goto out_status; } } if (po->has_vnet_hdr && packet_snd_vnet_gso(skb, vnet_hdr)) { tp_len = -EINVAL; goto tpacket_error; } packet_pick_tx_queue(dev, skb); skb->destructor = tpacket_destruct_skb; __packet_set_status(po, ph, TP_STATUS_SENDING); packet_inc_pending(&po->tx_ring); status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); if (unlikely(err > 0)) { err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ skb = NULL; goto out_status; } /* * skb was dropped but not destructed yet; * let's treat it like congestion or err < 0 */ err = 0; } packet_increment_head(&po->tx_ring); len_sum += tp_len; } while (likely((ph != NULL) || /* Note: packet_read_pending() might be slow if we have * to call it as it's per_cpu variable, but in fast-path * we already short-circuit the loop with the first * condition, and luckily don't have to go that path * anyway. */ (need_wait && packet_read_pending(&po->tx_ring)))); err = len_sum; goto out_put; out_status: __packet_set_status(po, ph, status); kfree_skb(skb); out_put: dev_put(dev); out: mutex_unlock(&po->pg_vec_lock); return err; } static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, size_t reserve, size_t len, size_t linear, int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, err, 0); if (!skb) return NULL; skb_reserve(skb, reserve); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); struct sk_buff *skb; struct net_device *dev; __be16 proto; unsigned char *addr; int err, reserve = 0; struct sockcm_cookie sockc; struct virtio_net_hdr vnet_hdr = { 0 }; int offset = 0; struct packet_sock *po = pkt_sk(sk); int hlen, tlen; int extra_len = 0; /* * Get and verify the address. */ if (likely(saddr == NULL)) { dev = packet_cached_dev_get(po); proto = po->num; addr = NULL; } else { err = -EINVAL; if (msg->msg_namelen < sizeof(struct sockaddr_ll)) goto out; if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) goto out; proto = saddr->sll_protocol; addr = saddr->sll_addr; dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); } err = -ENXIO; if (unlikely(dev == NULL)) goto out_unlock; err = -ENETDOWN; if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) goto out_unlock; } if (sock->type == SOCK_RAW) reserve = dev->hard_header_len; if (po->has_vnet_hdr) { err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); if (err) goto out_unlock; } if (unlikely(sock_flag(sk, SOCK_NOFCS))) { if (!netif_supports_nofcs(dev)) { err = -EPROTONOSUPPORT; goto out_unlock; } extra_len = 4; /* We're doing our own CRC */ } err = -EMSGSIZE; if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) goto out_unlock; err = -ENOBUFS; hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; skb_set_network_header(skb, reserve); err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (unlikely(offset < 0)) goto out_free; } /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len); if (err) goto out_free; if (sock->type == SOCK_RAW && !dev_validate_header(dev, skb->data, len)) { err = -EINVAL; goto out_free; } sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { err = -EMSGSIZE; goto out_free; } skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sockc.mark; packet_pick_tx_queue(dev, skb); if (po->has_vnet_hdr) { err = packet_snd_vnet_gso(skb, &vnet_hdr); if (err) goto out_free; len += sizeof(vnet_hdr); } skb_probe_transport_header(skb, reserve); if (unlikely(extra_len == 4)) skb->no_fcs = 1; err = po->xmit(skb); if (err > 0 && (err = net_xmit_errno(err)) != 0) goto out_unlock; dev_put(dev); return len; out_free: kfree_skb(skb); out_unlock: if (dev) dev_put(dev); out: return err; } static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); if (po->tx_ring.pg_vec) return tpacket_snd(po, msg); else return packet_snd(sock, msg, len); } /* * Close a PACKET socket. This is fairly simple. We immediately go * to 'closed' state and remove our protocol entry in the device list. */ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; struct net *net; union tpacket_req_u req_u; if (!sk) return 0; net = sock_net(sk); po = pkt_sk(sk); mutex_lock(&net->packet.sklist_lock); sk_del_node_init_rcu(sk); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); preempt_enable(); spin_lock(&po->bind_lock); unregister_prot_hook(sk, false); packet_cached_dev_reset(po); if (po->prot_hook.dev) { dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); packet_flush_mclist(sk); if (po->rx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); } if (po->tx_ring.pg_vec) { memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); } fanout_release(sk); synchronize_net(); /* * Now the socket is dead. No more input will appear. */ sock_orphan(sk); sock->sk = NULL; /* Purge queues */ skb_queue_purge(&sk->sk_receive_queue); packet_free_pending(po); sk_refcnt_debug_release(sk); sock_put(sk); return 0; } /* * Attach a packet hook. */ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, __be16 proto) { struct packet_sock *po = pkt_sk(sk); struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; struct net_device *dev = NULL; int ret = 0; bool unlisted = false; if (po->fanout) return -EINVAL; lock_sock(sk); spin_lock(&po->bind_lock); rcu_read_lock(); if (name) { dev = dev_get_by_name_rcu(sock_net(sk), name); if (!dev) { ret = -ENODEV; goto out_unlock; } } else if (ifindex) { dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { ret = -ENODEV; goto out_unlock; } } if (dev) dev_hold(dev); proto_curr = po->prot_hook.type; dev_curr = po->prot_hook.dev; need_rehook = proto_curr != proto || dev_curr != dev; if (need_rehook) { if (po->running) { rcu_read_unlock(); __unregister_prot_hook(sk, true); rcu_read_lock(); dev_curr = po->prot_hook.dev; if (dev) unlisted = !dev_get_by_index_rcu(sock_net(sk), dev->ifindex); } po->num = proto; po->prot_hook.type = proto; if (unlikely(unlisted)) { dev_put(dev); po->prot_hook.dev = NULL; po->ifindex = -1; packet_cached_dev_reset(po); } else { po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } } if (dev_curr) dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; if (!unlisted && (!dev || (dev->flags & IFF_UP))) { register_prot_hook(sk); } else { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } out_unlock: rcu_read_unlock(); spin_unlock(&po->bind_lock); release_sock(sk); return ret; } /* * Bind a packet socket to a device */ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; char name[15]; /* * Check legality */ if (addr_len != sizeof(struct sockaddr)) return -EINVAL; strlcpy(name, uaddr->sa_data, sizeof(name)); return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; struct sock *sk = sock->sk; /* * Check legality */ if (addr_len < sizeof(struct sockaddr_ll)) return -EINVAL; if (sll->sll_family != AF_PACKET) return -EINVAL; return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol ? : pkt_sk(sk)->num); } static struct proto packet_proto = { .name = "PACKET", .owner = THIS_MODULE, .obj_size = sizeof(struct packet_sock), }; /* * Create a packet of type SOCK_PACKET. */ static int packet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && sock->type != SOCK_PACKET) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); if (sk == NULL) goto out; sock->ops = &packet_ops; if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; err = packet_alloc_pending(po); if (err) goto out2; packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; sk_refcnt_debug_inc(sk); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); mutex_init(&po->pg_vec_lock); po->rollover = NULL; po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; if (proto) { po->prot_hook.type = proto; register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); sk_add_node_rcu(sk, &net->packet.sklist); mutex_unlock(&net->packet.sklist_lock); preempt_disable(); sock_prot_inuse_add(net, &packet_proto, 1); preempt_enable(); return 0; out2: sk_free(sk); out: return err; } /* * Pull a packet from our receive queue and hand it to the user. * If necessary we block. */ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; int vnet_hdr_len = 0; unsigned int origlen = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = sock_recv_errqueue(sk, msg, len, SOL_PACKET, PACKET_TX_TIMESTAMP); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->pressure) packet_rcv_has_room(pkt_sk(sk), NULL); if (pkt_sk(sk)->has_vnet_hdr) { err = packet_rcv_vnet(msg, skb, &len); if (err) goto out_free; vnet_hdr_len = sizeof(struct virtio_net_hdr); } /* You lose any data beyond the buffer you gave. If it worries * a user program they can ask the device for its MTU * anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; if (sock->type != SOCK_PACKET) { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { /* If the address length field is there to be filled * in, we fill it in now. */ if (sock->type == SOCK_PACKET) { __sockaddr_check_size(sizeof(struct sockaddr_pkt)); msg->msg_namelen = sizeof(struct sockaddr_pkt); } else { struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); } if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && (skb->ip_summed == CHECKSUM_COMPLETE || skb_csum_unnecessary(skb))) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (skb_vlan_tag_present(skb)) { aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; } put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; if (peer) return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); return 0; } static int packet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct net_device *dev; struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); if (peer) return -EOPNOTSUPP; sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len); } else { sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */ sll->sll_halen = 0; } rcu_read_unlock(); *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; return 0; } static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) { switch (i->type) { case PACKET_MR_MULTICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_mc_add(dev, i->addr); else return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); case PACKET_MR_ALLMULTI: return dev_set_allmulti(dev, what); case PACKET_MR_UNICAST: if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) return dev_uc_add(dev, i->addr); else return dev_uc_del(dev, i->addr); break; default: break; } return 0; } static void packet_dev_mclist_delete(struct net_device *dev, struct packet_mclist **mlp) { struct packet_mclist *ml; while ((ml = *mlp) != NULL) { if (ml->ifindex == dev->ifindex) { packet_dev_mc(dev, ml, -1); *mlp = ml->next; kfree(ml); } else mlp = &ml->next; } } static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml, *i; struct net_device *dev; int err; rtnl_lock(); err = -ENODEV; dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); if (!dev) goto done; err = -EINVAL; if (mreq->mr_alen > dev->addr_len) goto done; err = -ENOBUFS; i = kmalloc(sizeof(*i), GFP_KERNEL); if (i == NULL) goto done; err = 0; for (ml = po->mclist; ml; ml = ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { ml->count++; /* Free the new element ... */ kfree(i); goto done; } } i->type = mreq->mr_type; i->ifindex = mreq->mr_ifindex; i->alen = mreq->mr_alen; memcpy(i->addr, mreq->mr_address, i->alen); memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); i->count = 1; i->next = po->mclist; po->mclist = i; err = packet_dev_mc(dev, i, 1); if (err) { po->mclist = i->next; kfree(i); } done: rtnl_unlock(); return err; } static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) { struct packet_mclist *ml, **mlp; rtnl_lock(); for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { if (ml->ifindex == mreq->mr_ifindex && ml->type == mreq->mr_type && ml->alen == mreq->mr_alen && memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev) packet_dev_mc(dev, ml, -1); kfree(ml); } break; } } rtnl_unlock(); return 0; } static void packet_flush_mclist(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_mclist *ml; if (!po->mclist) return; rtnl_lock(); while ((ml = po->mclist) != NULL) { struct net_device *dev; po->mclist = ml->next; dev = __dev_get_by_index(sock_net(sk), ml->ifindex); if (dev != NULL) packet_dev_mc(dev, ml, -1); kfree(ml); } rtnl_unlock(); } static int packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); int ret; if (level != SOL_PACKET) return -ENOPROTOOPT; switch (optname) { case PACKET_ADD_MEMBERSHIP: case PACKET_DROP_MEMBERSHIP: { struct packet_mreq_max mreq; int len = optlen; memset(&mreq, 0, sizeof(mreq)); if (len < sizeof(struct packet_mreq)) return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); if (copy_from_user(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; if (optname == PACKET_ADD_MEMBERSHIP) ret = packet_mc_add(sk, &mreq); else ret = packet_mc_drop(sk, &mreq); return ret; } case PACKET_RX_RING: case PACKET_TX_RING: { union tpacket_req_u req_u; int len; switch (po->tp_version) { case TPACKET_V1: case TPACKET_V2: len = sizeof(req_u.req); break; case TPACKET_V3: default: len = sizeof(req_u.req3); break; } if (optlen < len) return -EINVAL; if (copy_from_user(&req_u.req, optval, len)) return -EFAULT; return packet_set_ring(sk, &req_u, 0, optname == PACKET_TX_RING); } case PACKET_COPY_THRESH: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; return 0; } case PACKET_VERSION: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: break; default: return -EINVAL; } lock_sock(sk); if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { ret = -EBUSY; } else { po->tp_version = val; ret = 0; } release_sock(sk); return ret; } case PACKET_RESERVE: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_reserve = val; return 0; } case PACKET_LOSS: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_loss = !!val; return 0; } case PACKET_AUXDATA: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->auxdata = !!val; return 0; } case PACKET_ORIGDEV: { int val; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->origdev = !!val; return 0; } case PACKET_VNET_HDR: { int val; if (sock->type != SOCK_RAW) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->has_vnet_hdr = !!val; return 0; } case PACKET_TIMESTAMP: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; return 0; } case PACKET_FANOUT: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); } case PACKET_FANOUT_DATA: { if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); } case PACKET_TX_HAS_OFF: { unsigned int val; if (optlen != sizeof(val)) return -EINVAL; if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->tp_tx_has_off = !!val; return 0; } case PACKET_QDISC_BYPASS: { int val; if (optlen != sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; return 0; } default: return -ENOPROTOOPT; } } static int packet_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { int len; int val, lv = sizeof(val); struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); void *data = &val; union tpacket_stats_u st; struct tpacket_rollover_stats rstats; if (level != SOL_PACKET) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case PACKET_STATISTICS: spin_lock_bh(&sk->sk_receive_queue.lock); memcpy(&st, &po->stats, sizeof(st)); memset(&po->stats, 0, sizeof(po->stats)); spin_unlock_bh(&sk->sk_receive_queue.lock); if (po->tp_version == TPACKET_V3) { lv = sizeof(struct tpacket_stats_v3); st.stats3.tp_packets += st.stats3.tp_drops; data = &st.stats3; } else { lv = sizeof(struct tpacket_stats); st.stats1.tp_packets += st.stats1.tp_drops; data = &st.stats1; } break; case PACKET_AUXDATA: val = po->auxdata; break; case PACKET_ORIGDEV: val = po->origdev; break; case PACKET_VNET_HDR: val = po->has_vnet_hdr; break; case PACKET_VERSION: val = po->tp_version; break; case PACKET_HDRLEN: if (len > sizeof(int)) len = sizeof(int); if (copy_from_user(&val, optval, len)) return -EFAULT; switch (val) { case TPACKET_V1: val = sizeof(struct tpacket_hdr); break; case TPACKET_V2: val = sizeof(struct tpacket2_hdr); break; case TPACKET_V3: val = sizeof(struct tpacket3_hdr); break; default: return -EINVAL; } break; case PACKET_RESERVE: val = po->tp_reserve; break; case PACKET_LOSS: val = po->tp_loss; break; case PACKET_TIMESTAMP: val = po->tp_tstamp; break; case PACKET_FANOUT: val = (po->fanout ? ((u32)po->fanout->id | ((u32)po->fanout->type << 16) | ((u32)po->fanout->flags << 24)) : 0); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) return -EINVAL; rstats.tp_all = atomic_long_read(&po->rollover->num); rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); data = &rstats; lv = sizeof(rstats); break; case PACKET_TX_HAS_OFF: val = po->tp_tx_has_off; break; case PACKET_QDISC_BYPASS: val = packet_use_direct_xmit(po); break; default: return -ENOPROTOOPT; } if (len > lv) len = lv; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, data, len)) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static int compat_packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct packet_sock *po = pkt_sk(sock->sk); if (level != SOL_PACKET) return -ENOPROTOOPT; if (optname == PACKET_FANOUT_DATA && po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { optval = (char __user *)get_compat_bpf_fprog(optval); if (!optval) return -EFAULT; optlen = sizeof(struct sock_fprog); } return packet_setsockopt(sock, level, optname, optval, optlen); } #endif static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { struct sock *sk; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); sk_for_each_rcu(sk, &net->packet.sklist) { struct packet_sock *po = pkt_sk(sk); switch (msg) { case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); /* fallthrough */ case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->running) { __unregister_prot_hook(sk, false); sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); fanout_release(sk); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); po->prot_hook.dev = NULL; } spin_unlock(&po->bind_lock); } break; case NETDEV_UP: if (dev->ifindex == po->ifindex) { spin_lock(&po->bind_lock); if (po->num) register_prot_hook(sk); spin_unlock(&po->bind_lock); } break; } } rcu_read_unlock(); return NOTIFY_DONE; } static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) amount = skb->len; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); #ifdef CONFIG_INET case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: return inet_dgram_ops.ioctl(sock, cmd, arg); #endif default: return -ENOIOCTLCMD; } return 0; } static unsigned int packet_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned int mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { if (!packet_previous_rx_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) mask |= POLLIN | POLLRDNORM; } if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) po->pressure = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) mask |= POLLOUT | POLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; } /* Dirty? Well, I still did not learn better way to account * for user mmaps. */ static void packet_mm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_inc(&pkt_sk(sk)->mapped); } static void packet_mm_close(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct socket *sock = file->private_data; struct sock *sk = sock->sk; if (sk) atomic_dec(&pkt_sk(sk)->mapped); } static const struct vm_operations_struct packet_mmap_ops = { .open = packet_mm_open, .close = packet_mm_close, }; static void free_pg_vec(struct pgv *pg_vec, unsigned int order, unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { if (is_vmalloc_addr(pg_vec[i].buffer)) vfree(pg_vec[i].buffer); else free_pages((unsigned long)pg_vec[i].buffer, order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer; gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* __get_free_pages failed, fall back to vmalloc */ buffer = vzalloc((1 << order) * PAGE_SIZE); if (buffer) return buffer; /* vmalloc failed, lets dig into swap here */ gfp_flags &= ~__GFP_NORETRY; buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; /* complete and utter failure */ return NULL; } static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; struct pgv *pg_vec; int i; pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); if (unlikely(!pg_vec)) goto out; for (i = 0; i < block_nr; i++) { pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } out: return pg_vec; out_free_pgvec: free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, int closing, int tx_ring) { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; lock_sock(sk); /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { net_warn_ratelimited("Tx-ring is not supported.\n"); goto out; } rb = tx_ring ? &po->tx_ring : &po->rx_ring; rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; err = -EBUSY; if (!closing) { if (atomic_read(&po->mapped)) goto out; if (packet_read_pending(rb)) goto out; } if (req->tp_block_nr) { /* Sanity tests and some calculations */ err = -EBUSY; if (unlikely(rb->pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V1: po->tp_hdrlen = TPACKET_HDRLEN; break; case TPACKET_V2: po->tp_hdrlen = TPACKET2_HDRLEN; break; case TPACKET_V3: po->tp_hdrlen = TPACKET3_HDRLEN; break; } err = -EINVAL; if (unlikely((int)req->tp_block_size <= 0)) goto out; if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && (int)(req->tp_block_size - BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) goto out; if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) goto out; rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; err = -ENOMEM; order = get_order(req->tp_block_size); pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { case TPACKET_V3: /* Transmit path is not supported. We checked * it above but just being paranoid */ if (!tx_ring) init_prb_bdqc(po, rb, pg_vec, req_u); break; default: break; } } /* Done */ else { err = -EINVAL; if (unlikely(req->tp_frame_nr)) goto out; } /* Detach socket from network */ spin_lock(&po->bind_lock); was_running = po->running; num = po->num; if (was_running) { po->num = 0; __unregister_prot_hook(sk, false); } spin_unlock(&po->bind_lock); synchronize_net(); err = -EBUSY; mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; spin_lock_bh(&rb_queue->lock); swap(rb->pg_vec, pg_vec); rb->frame_max = (req->tp_frame_nr - 1); rb->head = 0; rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; po->prot_hook.func = (po->rx_ring.pg_vec) ? tpacket_rcv : packet_rcv; skb_queue_purge(rb_queue); if (atomic_read(&po->mapped)) pr_err("packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running) { po->num = num; register_prot_hook(sk); } spin_unlock(&po->bind_lock); if (closing && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: release_sock(sk); return err; } static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); unsigned long size, expected_size; struct packet_ring_buffer *rb; unsigned long start; int err = -EINVAL; int i; if (vma->vm_pgoff) return -EINVAL; mutex_lock(&po->pg_vec_lock); expected_size = 0; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec) { expected_size += rb->pg_vec_len * rb->pg_vec_pages * PAGE_SIZE; } } if (expected_size == 0) goto out; size = vma->vm_end - vma->vm_start; if (size != expected_size) goto out; start = vma->vm_start; for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { if (rb->pg_vec == NULL) continue; for (i = 0; i < rb->pg_vec_len; i++) { struct page *page; void *kaddr = rb->pg_vec[i].buffer; int pg_num; for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { page = pgv_to_page(kaddr); err = vm_insert_page(vma, start, page); if (unlikely(err)) goto out; start += PAGE_SIZE; kaddr += PAGE_SIZE; } } } atomic_inc(&po->mapped); vma->vm_ops = &packet_mmap_ops; err = 0; out: mutex_unlock(&po->pg_vec_lock); return err; } static const struct proto_ops packet_ops_spkt = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind_spkt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { .family = PF_PACKET, .owner = THIS_MODULE, .release = packet_release, .bind = packet_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_packet_setsockopt, #endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { .family = PF_PACKET, .create = packet_create, .owner = THIS_MODULE, }; static struct notifier_block packet_netdev_notifier = { .notifier_call = packet_notifier, }; #ifdef CONFIG_PROC_FS static void *packet_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); rcu_read_lock(); return seq_hlist_start_head_rcu(&net->packet.sklist, *pos); } static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); return seq_hlist_next_rcu(v, &net->packet.sklist, pos); } static void packet_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int packet_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); else { struct sock *s = sk_entry(v); const struct packet_sock *po = pkt_sk(s); seq_printf(seq, "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), s->sk_type, ntohs(po->num), po->ifindex, po->running, atomic_read(&s->sk_rmem_alloc), from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), sock_i_ino(s)); } return 0; } static const struct seq_operations packet_seq_ops = { .start = packet_seq_start, .next = packet_seq_next, .stop = packet_seq_stop, .show = packet_seq_show, }; static int packet_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &packet_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations packet_seq_fops = { .owner = THIS_MODULE, .open = packet_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif static int __net_init packet_net_init(struct net *net) { mutex_init(&net->packet.sklist_lock); INIT_HLIST_HEAD(&net->packet.sklist); if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops)) return -ENOMEM; return 0; } static void __net_exit packet_net_exit(struct net *net) { remove_proc_entry("packet", net->proc_net); } static struct pernet_operations packet_net_ops = { .init = packet_net_init, .exit = packet_net_exit, }; static void __exit packet_exit(void) { unregister_netdevice_notifier(&packet_netdev_notifier); unregister_pernet_subsys(&packet_net_ops); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); } static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; } module_init(packet_init); module_exit(packet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PACKET);
./CrossVul/dataset_final_sorted/CWE-416/c/good_5354_0
crossvul-cpp_data_bad_5332_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) && (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (QuantumRange <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; if ((channel & RedChannel) != 0) { pixel=GetPixelRed(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & GreenChannel) != 0) { pixel=GetPixelGreen(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & BlueChannel) != 0) { pixel=GetPixelBlue(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { pixel=GetPixelOpacity(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel=GetPixelIndex(indexes+x); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse) atDepth=MagickTrue; if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->matte != MagickFalse)) type=GrayscaleMatteType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if type of the image is bi-level. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((channel & GreenChannel) != 0) image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].opacity),range),range); } } status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (QuantumRange <= MaxMap) RestoreMSCWarning { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelRed(q)),range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelGreen(q)),range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelBlue(q)),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( GetPixelOpacity(q)),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { if (SetImageMonochrome(image,&image->exception) == MagickFalse) { status=TransformImageColorspace(image,GRAYColorspace); (void) NormalizeImage(image); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->colors=2; image->matte=MagickFalse; break; } case GrayscaleType: { if (SetImageGray(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { if (SetImageGray(image,&image->exception) == MagickFalse) status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { if (image->colorspace != CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); status=TransformImageColorspace(image,CMYKColorspace); } if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-416/c/bad_5332_0
crossvul-cpp_data_good_1314_0
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 */ #include <linux/fs.h> #include <linux/time.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/workqueue.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/iomap.h> #include <linux/iversion.h> #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "truncate.h" #include <trace/events/ext4.h> #define MPAGE_DA_EXTENT_TAIL 0x01 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __u16 dummy_csum = 0; int offset = offsetof(struct ext4_inode, i_checksum_lo); unsigned int csum_size = sizeof(dummy_csum); csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, EXT4_GOOD_OLD_INODE_SIZE - offset); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { offset = offsetof(struct ext4_inode, i_checksum_hi); csum = ext4_chksum(sbi, csum, (__u8 *)raw + EXT4_GOOD_OLD_INODE_SIZE, offset - EXT4_GOOD_OLD_INODE_SIZE); if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; } csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, EXT4_INODE_SIZE(inode->i_sb) - offset); } return csum; } static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 provided, calculated; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_metadata_csum(inode->i_sb)) return 1; provided = le16_to_cpu(raw->i_checksum_lo); calculated = ext4_inode_csum(inode, raw, ei); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; else calculated &= 0xFFFF; return provided == calculated; } static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 csum; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_metadata_csum(inode->i_sb)) return; csum = ext4_inode_csum(inode, raw, ei); raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) raw->i_checksum_hi = cpu_to_le16(csum >> 16); } static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { trace_ext4_begin_ordered_truncate(inode, new_size); /* * If jinode is zero, then we never opened the file for * writing, so there's no need to call * jbd2_journal_begin_ordered_truncate() since there's no * outstanding writes we need to flush. */ if (!EXT4_I(inode)->jinode) return 0; return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode, new_size); } static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); /* * Test whether an inode is a fast symlink. * A fast symlink has its symlink data stored in ext4_inode_info->i_data. */ int ext4_inode_is_fast_symlink(struct inode *inode) { if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { int ea_blocks = EXT4_I(inode)->i_file_acl ? EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; if (ext4_has_inline_data(inode)) return 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } return S_ISLNK(inode->i_mode) && inode->i_size && (inode->i_size < EXT4_N_BLOCKS * 4); } /* * Called at the last iput() if i_nlink is zero. */ void ext4_evict_inode(struct inode *inode) { handle_t *handle; int err; /* * Credits for final inode cleanup and freeing: * sb + inode (ext4_orphan_del()), block bitmap, group descriptor * (xattr block freeing), bitmap, group descriptor (inode freeing) */ int extra_credits = 6; struct ext4_xattr_inode_array *ea_inode_array = NULL; trace_ext4_evict_inode(inode); if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the * journal. So although mm thinks everything is clean and * ready for reaping the inode might still have some pages to * write in the running transaction or waiting to be * checkpointed. Thus calling jbd2_journal_invalidatepage() * (via truncate_inode_pages()) to discard these buffers can * cause data loss. Also even if we did not discard these * buffers, we would have no way to find them after the inode * is reaped and thus user could see stale data if he tries to * read them before the transaction is checkpointed. So be * careful and force everything to disk here... We use * ei->i_datasync_tid to store the newest transaction * containing inode's data. * * Note that directories do not have this problem because they * don't use page cache. */ if (inode->i_ino != EXT4_JOURNAL_INO && ext4_should_journal_data(inode) && (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && inode->i_data.nrpages) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; jbd2_complete_transaction(journal, commit_tid); filemap_write_and_wait(&inode->i_data); } truncate_inode_pages_final(&inode->i_data); goto no_delete; } if (is_bad_inode(inode)) goto no_delete; dquot_initialize(inode); if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages_final(&inode->i_data); /* * Protect us against freezing - iput() caller didn't have to have any * protection against it */ sb_start_intwrite(inode->i_sb); if (!IS_NOQUOTA(inode)) extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); /* * Block bitmap, group descriptor, and inode are accounted in both * ext4_blocks_for_truncate() and extra_credits. So subtract 3. */ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, ext4_blocks_for_truncate(inode) + extra_credits - 3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext4_orphan_del(NULL, inode); sb_end_intwrite(inode->i_sb); goto no_delete; } if (IS_SYNC(inode)) ext4_handle_sync(handle); /* * Set inode->i_size to 0 before calling ext4_truncate(). We need * special handling of symlinks here because i_size is used to * determine whether ext4_inode_info->i_data contains symlink data or * block mappings. Setting i_size to 0 will remove its fast symlink * status. Erase i_data so that it becomes a valid empty block map. */ if (ext4_inode_is_fast_symlink(inode)) memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); inode->i_size = 0; err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_warning(inode->i_sb, "couldn't mark inode dirty (err %d)", err); goto stop_handle; } if (inode->i_blocks) { err = ext4_truncate(inode); if (err) { ext4_error(inode->i_sb, "couldn't truncate inode %lu (err %d)", inode->i_ino, err); goto stop_handle; } } /* Remove xattr references. */ err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, extra_credits); if (err) { ext4_warning(inode->i_sb, "xattr delete (err %d)", err); stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); goto no_delete; } /* * Kill off the orphan record which ext4_truncate created. * AKPM: I think this can be inside the above `if'. * Note that ext4_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - this is because we don't * know if ext4_truncate() actually created an orphan record. * (Well, we could do this if we need to, but heck - it works) */ ext4_orphan_del(handle, inode); EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext4_mark_inode_dirty(handle, inode)) /* If that failed, just do the required in-core inode clear. */ ext4_clear_inode(inode); else ext4_free_inode(handle, inode); ext4_journal_stop(handle); sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); return; no_delete: ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ } #ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) { return &EXT4_I(inode)->i_reserved_quota; } #endif /* * Called with i_data_sem down, which is important since we can call * ext4_discard_preallocations() from here. */ void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); spin_lock(&ei->i_block_reservation_lock); trace_ext4_da_update_reserve_space(inode, used, quota_claim); if (unlikely(used > ei->i_reserved_data_blocks)) { ext4_warning(inode->i_sb, "%s: ino %lu, used %d " "with only %d reserved data blocks", __func__, inode->i_ino, used, ei->i_reserved_data_blocks); WARN_ON(1); used = ei->i_reserved_data_blocks; } /* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* Update quota subsystem for data blocks */ if (quota_claim) dquot_claim_block(inode, EXT4_C2B(sbi, used)); else { /* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should * not re-claim the quota for fallocated blocks. */ dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); } /* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) ext4_discard_preallocations(inode); } static int __check_block_validity(struct inode *inode, const char *func, unsigned int line, struct ext4_map_blocks *map) { if (ext4_has_feature_journal(inode->i_sb) && (inode->i_ino == le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) return 0; if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, map->m_pblk, map->m_len); return -EFSCORRUPTED; } return 0; } int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, ext4_lblk_t len) { int ret; if (IS_ENCRYPTED(inode)) return fscrypt_zeroout_range(inode, lblk, pblk, len); ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); if (ret > 0) ret = 0; return ret; } #define check_block_validity(inode, map) \ __check_block_validity((inode), __func__, __LINE__, (map)) #ifdef ES_AGGRESSIVE_TEST static void ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, struct ext4_map_blocks *es_map, struct ext4_map_blocks *map, int flags) { int retval; map->m_flags = 0; /* * There is a race window that the result is not the same. * e.g. xfstests #223 when dioread_nolock enables. The reason * is that we lookup a block mapping in extent status tree with * out taking i_data_sem. So at the time the unwritten extent * could be converted. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } up_read((&EXT4_I(inode)->i_data_sem)); /* * We don't check m_len because extent will be collpased in status * tree. So the m_len might not equal. */ if (es_map->m_lblk != map->m_lblk || es_map->m_flags != map->m_flags || es_map->m_pblk != map->m_pblk) { printk("ES cache assertion failed for inode: %lu " "es_cached ex [%d/%d/%llu/%x] != " "found ex [%d/%d/%llu/%x] retval %d flags %x\n", inode->i_ino, es_map->m_lblk, es_map->m_len, es_map->m_pblk, es_map->m_flags, map->m_lblk, map->m_len, map->m_pblk, map->m_flags, retval, flags); } } #endif /* ES_AGGRESSIVE_TEST */ /* * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. * * Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * * If file type is extents based, it will call ext4_ext_map_blocks(), * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping * based files * * On success, it returns the number of blocks being mapped or allocated. if * create==0 and the blocks are pre-allocated and unwritten, the resulting @map * is marked as unwritten. If the create == 1, it will mark @map as mapped. * * It returns 0 if plain look up failed (blocks have not been allocated), in * that case, @map is returned as unmapped but we still do fill map->m_len to * indicate the length of a hole starting at map->m_lblk. * * It returns the error in case of allocation failure. */ int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct extent_status es; int retval; int ret = 0; #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif map->m_flags = 0; ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," "logical block %lu\n", inode->i_ino, flags, map->m_len, (unsigned long) map->m_lblk); /* * ext4_map_blocks returns an int, and m_len is an unsigned int */ if (unlikely(map->m_len > INT_MAX)) map->m_len = INT_MAX; /* We can handle the block number less than EXT_MAX_BLOCKS */ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) return -EFSCORRUPTED; /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; map->m_flags |= ext4_es_is_written(&es) ? EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { map->m_pblk = 0; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; retval = 0; } else { BUG(); } #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(handle, inode, map, &orig_map, flags); #endif goto found; } /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } if (retval > 0) { unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && !(status & EXTENT_STATUS_WRITTEN) && ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret < 0) retval = ret; } up_read((&EXT4_I(inode)->i_data_sem)); found: if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; } /* If it is only a block(s) look up */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) return retval; /* * Returns if the blocks have already allocated * * Note that if blocks have been preallocated * ext4_ext_get_block() returns the create = 0 * with buffer head unmapped. */ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) /* * If we need to convert extent to unwritten * we continue and do the actual work in * ext4_ext_map_blocks() */ if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) return retval; /* * Here we clear m_flags because after allocating an new extent, * it will be set again. */ map->m_flags &= ~EXT4_MAP_FLAGS; /* * New blocks allocate and/or writing to unwritten extent * will possibly result in updating i_data, so we take * the write lock of i_data_sem, and call get_block() * with create == 1 flag. */ down_write(&EXT4_I(inode)->i_data_sem); /* * We need to check for EXT4 here because migrate * could have changed the inode type in between */ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags); if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { /* * We allocated new blocks which will result in * i_data's format changing. Force the migrate * to fail by clearing migrate flags */ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); } /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. We don't * support fallocate for non extent files. So we can update * reserve space here. */ if ((retval > 0) && (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) ext4_da_update_reserve_space(inode, retval, 1); } if (retval > 0) { unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } /* * We have to zeroout blocks before inserting them into extent * status tree. Otherwise someone could look them up there and * use them before they are really zeroed. We also have to * unmap metadata before zeroing as otherwise writeback can * overwrite zeros with stale data from block device. */ if (flags & EXT4_GET_BLOCKS_ZERO && map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) { ret = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk, map->m_len); if (ret) { retval = ret; goto out_sem; } } /* * If the extent has been zeroed out, we don't need to update * extent status tree. */ if ((flags & EXT4_GET_BLOCKS_PRE_IO) && ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es)) goto out_sem; } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && !(status & EXTENT_STATUS_WRITTEN) && ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret < 0) { retval = ret; goto out_sem; } } out_sem: up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; /* * Inodes with freshly allocated blocks where contents will be * visible after transaction commit must be on transaction's * ordered data list. */ if (map->m_flags & EXT4_MAP_NEW && !(map->m_flags & EXT4_MAP_UNWRITTEN) && !(flags & EXT4_GET_BLOCKS_ZERO) && !ext4_is_quota_file(inode) && ext4_should_order_data(inode)) { loff_t start_byte = (loff_t)map->m_lblk << inode->i_blkbits; loff_t length = (loff_t)map->m_len << inode->i_blkbits; if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) ret = ext4_jbd2_inode_add_wait(handle, inode, start_byte, length); else ret = ext4_jbd2_inode_add_write(handle, inode, start_byte, length); if (ret) return ret; } } return retval; } /* * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages * we have to be careful as someone else may be manipulating b_state as well. */ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) { unsigned long old_state; unsigned long new_state; flags &= EXT4_MAP_FLAGS; /* Dummy buffer_head? Set non-atomically. */ if (!bh->b_page) { bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; return; } /* * Someone else may be modifying b_state. Be careful! This is ugly but * once we get rid of using bh as a container for mapping information * to pass to / from get_block functions, this can go away. */ do { old_state = READ_ONCE(bh->b_state); new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; } while (unlikely( cmpxchg(&bh->b_state, old_state, new_state) != old_state)); } static int _ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int flags) { struct ext4_map_blocks map; int ret = 0; if (ext4_has_inline_data(inode)) return -ERANGE; map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, flags); if (ret > 0) { map_bh(bh, inode->i_sb, map.m_pblk); ext4_update_bh_state(bh, map.m_flags); bh->b_size = inode->i_sb->s_blocksize * map.m_len; ret = 0; } else if (ret == 0) { /* hole case, need to fill in bh->b_size */ bh->b_size = inode->i_sb->s_blocksize * map.m_len; } return ret; } int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { return _ext4_get_block(inode, iblock, bh, create ? EXT4_GET_BLOCKS_CREATE : 0); } /* * Get block function used when preparing for buffered write if we require * creating an unwritten extent if blocks haven't been allocated. The extent * will be converted to written after the IO is complete. */ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", inode->i_ino, create); return _ext4_get_block(inode, iblock, bh_result, EXT4_GET_BLOCKS_IO_CREATE_EXT); } /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 /* * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct ext4_map_blocks map; struct buffer_head *bh; int create = map_flags & EXT4_GET_BLOCKS_CREATE; int err; J_ASSERT(handle != NULL || create == 0); map.m_lblk = block; map.m_len = 1; err = ext4_map_blocks(handle, inode, &map, map_flags); if (err == 0) return create ? ERR_PTR(-ENOSPC) : NULL; if (err < 0) return ERR_PTR(err); bh = sb_getblk(inode->i_sb, map.m_pblk); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); if (map.m_flags & EXT4_MAP_NEW) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); /* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext4_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, bh); if (unlikely(err)) { unlock_buffer(bh); goto errout; } if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (unlikely(err)) goto errout; } else BUFFER_TRACE(bh, "not a new buffer"); return bh; errout: brelse(bh); return ERR_PTR(err); } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct buffer_head *bh; bh = ext4_getblk(handle, inode, block, map_flags); if (IS_ERR(bh)) return bh; if (!bh || ext4_buffer_uptodate(bh)) return bh; ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); return ERR_PTR(-EIO); } /* Read a contiguous batch of blocks. */ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, bool wait, struct buffer_head **bhs) { int i, err; for (i = 0; i < bh_count; i++) { bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); if (IS_ERR(bhs[i])) { err = PTR_ERR(bhs[i]); bh_count = i; goto out_brelse; } } for (i = 0; i < bh_count; i++) /* Note that NULL bhs[i] is valid because of holes. */ if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bhs[i]); if (!wait) return 0; for (i = 0; i < bh_count; i++) if (bhs[i]) wait_on_buffer(bhs[i]); for (i = 0; i < bh_count; i++) { if (bhs[i] && !buffer_uptodate(bhs[i])) { err = -EIO; goto out_brelse; } } return 0; out_brelse: for (i = 0; i < bh_count; i++) { brelse(bhs[i]); bhs[i] = NULL; } return err; } int ext4_walk_page_buffers(handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for (bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot * close off a transaction and start a new one between the ext4_get_block() * and the commit_write(). So doing the jbd2_journal_start at the start of * prepare_write() is the right place. * * Also, this function can nest inside ext4_writepage(). In that case, we * *know* that ext4_writepage() has generated enough buffer credits to do the * whole page. So we won't block on the journal in that case, which is good, * because the caller may be PF_MEMALLOC. * * By accident, ext4 can be reentered when a transaction is open via * quota file writes. If we were to commit the transaction while thus * reentered, there can be a deadlock - we would be holding a quota * lock, and the commit would never complete if another thread had a * transaction open and was blocking on the quota lock - a ranking * violation. * * So what we do is to rely on the fact that jbd2_journal_stop/journal_start * will _not_ run commit under these circumstances because handle->h_ref * is elevated. We'll still have enough credits for the tiny quotafile * write. */ int do_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { int dirty = buffer_dirty(bh); int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; /* * __block_write_begin() could have dirtied some buffers. Clean * the dirty bit as jbd2_journal_get_write_access() could complain * otherwise about fs integrity issues. Setting of the dirty bit * by __block_write_begin() isn't a real problem here as we clear * the bit before releasing a page lock and thus writeback cannot * ever write the buffer. */ if (dirty) clear_buffer_dirty(bh); BUFFER_TRACE(bh, "get write access"); ret = ext4_journal_get_write_access(handle, bh); if (!ret && dirty) ret = ext4_handle_dirty_metadata(handle, NULL, bh); return ret; } #ifdef CONFIG_FS_ENCRYPTION static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; sector_t block; int err = 0; unsigned blocksize = inode->i_sb->s_blocksize; unsigned bbits; struct buffer_head *bh, *head, *wait[2]; int nr_wait = 0; int i; BUG_ON(!PageLocked(page)); BUG_ON(from > PAGE_SIZE); BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bbits = ilog2(blocksize); block = (sector_t)page->index << (PAGE_SHIFT - bbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; if (buffer_new(bh)) { if (PageUptodate(page)) { clear_buffer_new(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); continue; } if (block_end > to || block_start < from) zero_user_segments(page, to, block_end, block_start, from); continue; } } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); wait[nr_wait++] = bh; } } /* * If we issued read requests, let them complete. */ for (i = 0; i < nr_wait; i++) { wait_on_buffer(wait[i]); if (!buffer_uptodate(wait[i])) err = -EIO; } if (unlikely(err)) { page_zero_new_buffers(page, from, to); } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { for (i = 0; i < nr_wait; i++) { int err2; err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize, bh_offset(wait[i])); if (err2) { clear_buffer_uptodate(wait[i]); err = err2; } } } return err; } #endif static int ext4_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int ret, needed_blocks; handle_t *handle; int retries = 0; struct page *page; pgoff_t index; unsigned from, to; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; trace_ext4_write_begin(inode, pos, len, flags); /* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; index = pos >> PAGE_SHIFT; from = pos & (PAGE_SIZE - 1); to = from + len; if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, flags, pagep); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { put_page(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); put_page(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_FS_ENCRYPTION if (ext4_should_dioread_nolock(inode)) ret = ext4_block_write_begin(page, pos, len, ext4_get_block_unwritten); else ret = ext4_block_write_begin(page, pos, len, ext4_get_block); #else if (ext4_should_dioread_nolock(inode)) ret = __block_write_begin(page, pos, len, ext4_get_block_unwritten); else ret = __block_write_begin(page, pos, len, ext4_get_block); #endif if (!ret && ext4_should_journal_data(inode)) { ret = ext4_walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } if (ret) { bool extended = (pos + len > inode->i_size) && !ext4_verity_in_progress(inode); unlock_page(page); /* * __block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. * * Add inode to orphan list in case we crash before * truncate finishes */ if (extended && ext4_can_truncate(inode)) ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (extended) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; put_page(page); return ret; } *pagep = page; return ret; } /* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct buffer_head *bh) { int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); ret = ext4_handle_dirty_metadata(handle, NULL, bh); clear_buffer_meta(bh); clear_buffer_prio(bh); return ret; } /* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * * ext4 never places buffers on inode->i_mapping->private_list. metadata * buffers are managed internally. */ static int ext4_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; int inline_data = ext4_has_inline_data(inode); bool verity = ext4_verity_in_progress(inode); trace_ext4_write_end(inode, pos, len, copied); if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { unlock_page(page); put_page(page); goto errout; } copied = ret; } else copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. * * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree * blocks are being written past EOF, so skip the i_size update. */ if (!verity) i_size_changed = ext4_update_inode_size(inode, pos + copied); unlock_page(page); put_page(page); if (old_size < pos && !verity) pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed || inline_data) ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); errout: ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size && !verity) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * This is a private version of page_zero_new_buffers() which doesn't * set the buffer to be dirty, since in data=journalled mode we need * to call ext4_handle_dirty_metadata() instead. */ static void ext4_journalled_zero_new_buffers(handle_t *handle, struct page *page, unsigned from, unsigned to) { unsigned int block_start = 0, block_end; struct buffer_head *head, *bh; bh = head = page_buffers(page); do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, size; start = max(from, block_start); size = min(to, block_end) - start; zero_user(page, start, size); write_end_fn(handle, bh); } clear_buffer_new(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } static int ext4_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int partial = 0; unsigned from, to; int size_changed = 0; int inline_data = ext4_has_inline_data(inode); bool verity = ext4_verity_in_progress(inode); trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_SIZE - 1); to = from + len; BUG_ON(!ext4_handle_valid(handle)); if (inline_data) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { unlock_page(page); put_page(page); goto errout; } copied = ret; } else if (unlikely(copied < len) && !PageUptodate(page)) { copied = 0; ext4_journalled_zero_new_buffers(handle, page, from, to); } else { if (unlikely(copied < len)) ext4_journalled_zero_new_buffers(handle, page, from + copied, to); ret = ext4_walk_page_buffers(handle, page_buffers(page), from, from + copied, &partial, write_end_fn); if (!partial) SetPageUptodate(page); } if (!verity) size_changed = ext4_update_inode_size(inode, pos + copied); ext4_set_inode_state(inode, EXT4_STATE_JDATA); EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; unlock_page(page); put_page(page); if (old_size < pos && !verity) pagecache_isize_extended(inode, old_size, pos); if (size_changed || inline_data) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; } if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); errout: ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size && !verity) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * Reserve space for a single cluster */ static int ext4_da_reserve_space(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int ret; /* * We will charge metadata quota at writeout time; this saves * us from metadata over-estimation, though we may go over by * a small amount in the end. Here we just reserve for data. */ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); if (ret) return ret; spin_lock(&ei->i_block_reservation_lock); if (ext4_claim_free_clusters(sbi, 1, 0)) { spin_unlock(&ei->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); return -ENOSPC; } ei->i_reserved_data_blocks++; trace_ext4_da_reserve_space(inode); spin_unlock(&ei->i_block_reservation_lock); return 0; /* success */ } void ext4_da_release_space(struct inode *inode, int to_free) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); if (!to_free) return; /* Nothing to release, exit */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); trace_ext4_da_release_space(inode, to_free); if (unlikely(to_free > ei->i_reserved_data_blocks)) { /* * if there aren't enough reserved blocks, then the * counter is messed up somewhere. Since this * function is called from invalidate page, it's * harmless to return without any action. */ ext4_warning(inode->i_sb, "ext4_da_release_space: " "ino %lu, to_free %d with only %d reserved " "data blocks", inode->i_ino, to_free, ei->i_reserved_data_blocks); WARN_ON(1); to_free = ei->i_reserved_data_blocks; } ei->i_reserved_data_blocks -= to_free; /* update fs dirty data blocks counter */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); } /* * Delayed allocation stuff */ struct mpage_da_data { struct inode *inode; struct writeback_control *wbc; pgoff_t first_page; /* The first page to write */ pgoff_t next_page; /* Current page to examine */ pgoff_t last_page; /* Last page to examine */ /* * Extent to map - this can be after first_page because that can be * fully mapped. We somewhat abuse m_flags to store whether the extent * is delalloc or unwritten. */ struct ext4_map_blocks map; struct ext4_io_submit io_submit; /* IO submission data */ unsigned int do_map:1; }; static void mpage_release_unused_pages(struct mpage_da_data *mpd, bool invalidate) { int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; /* This is necessary when next_page == 0. */ if (mpd->first_page >= mpd->next_page) return; index = mpd->first_page; end = mpd->next_page - 1; if (invalidate) { ext4_lblk_t start, last; start = index << (PAGE_SHIFT - inode->i_blkbits); last = end << (PAGE_SHIFT - inode->i_blkbits); ext4_es_remove_extent(inode, start, last - start + 1); } pagevec_init(&pvec); while (index <= end) { nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); if (invalidate) { if (page_mapped(page)) clear_page_dirty_for_io(page); block_invalidatepage(page, 0, PAGE_SIZE); ClearPageUptodate(page); } unlock_page(page); } pagevec_release(&pvec); } } static void ext4_print_free_blocks(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", EXT4_C2B(EXT4_SB(inode->i_sb), ext4_count_free_clusters(sb))); ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_freeclusters_counter))); ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_dirtyclusters_counter))); ext4_msg(sb, KERN_CRIT, "Block reservation details"); ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", ei->i_reserved_data_blocks); return; } static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); } /* * ext4_insert_delayed_block - adds a delayed block to the extents status * tree, incrementing the reserved cluster/block * count or making a pending reservation * where needed * * @inode - file containing the newly added block * @lblk - logical block to be added * * Returns 0 on success, negative error code on failure. */ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int ret; bool allocated = false; /* * If the cluster containing lblk is shared with a delayed, * written, or unwritten extent in a bigalloc file system, it's * already been accounted for and does not need to be reserved. * A pending reservation must be made for the cluster if it's * shared with a written or unwritten extent and doesn't already * have one. Written and unwritten extents can be purged from the * extents status tree if the system is under memory pressure, so * it's necessary to examine the extent tree if a search of the * extents status tree doesn't get a match. */ if (sbi->s_cluster_ratio == 1) { ret = ext4_da_reserve_space(inode); if (ret != 0) /* ENOSPC */ goto errout; } else { /* bigalloc */ if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { if (!ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk)) { ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk)); if (ret < 0) goto errout; if (ret == 0) { ret = ext4_da_reserve_space(inode); if (ret != 0) /* ENOSPC */ goto errout; } else { allocated = true; } } else { allocated = true; } } } ret = ext4_es_insert_delayed_block(inode, lblk, allocated); errout: return ret; } /* * This function is grabs code from the very beginning of * ext4_map_blocks, but assumes that the caller is from delayed write * time. This function looks up the requested blocks and sets the * buffer delay bit under the protection of i_data_sem. */ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, struct ext4_map_blocks *map, struct buffer_head *bh) { struct extent_status es; int retval; sector_t invalid_block = ~((sector_t) 0xffff); #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; map->m_flags = 0; ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," "logical block %lu\n", inode->i_ino, map->m_len, (unsigned long) map->m_lblk); /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) { if (ext4_es_is_hole(&es)) { retval = 0; down_read(&EXT4_I(inode)->i_data_sem); goto add_delayed; } /* * Delayed extent could be allocated by fallocate. * So we need to check it. */ if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); return 0; } map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; retval = es.es_len - (iblock - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; if (ext4_es_is_written(&es)) map->m_flags |= EXT4_MAP_MAPPED; else if (ext4_es_is_unwritten(&es)) map->m_flags |= EXT4_MAP_UNWRITTEN; else BUG(); #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); #endif return retval; } /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_has_inline_data(inode)) retval = 0; else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) retval = ext4_ext_map_blocks(NULL, inode, map, 0); else retval = ext4_ind_map_blocks(NULL, inode, map, 0); add_delayed: if (retval == 0) { int ret; /* * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ ret = ext4_insert_delayed_block(inode, map->m_lblk); if (ret != 0) { retval = ret; goto out_unlock; } map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); } else if (retval > 0) { int ret; unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret != 0) retval = ret; } out_unlock: up_read((&EXT4_I(inode)->i_data_sem)); return retval; } /* * This is a special get_block_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. * * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. * We also have b_blocknr = -1 and b_bdev initialized properly * * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev * initialized properly. */ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { struct ext4_map_blocks map; int ret = 0; BUG_ON(create == 0); BUG_ON(bh->b_size != inode->i_sb->s_blocksize); map.m_lblk = iblock; map.m_len = 1; /* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ ret = ext4_da_map_blocks(inode, iblock, &map, bh); if (ret <= 0) return ret; map_bh(bh, inode->i_sb, map.m_pblk); ext4_update_bh_state(bh, map.m_flags); if (buffer_unwritten(bh)) { /* A delayed write to unwritten bh should be marked * new and mapped. Mapped ensures that we don't do * get_block multiple times when we write to the same * offset and new ensures that we do proper zero out * for partial write. */ set_buffer_new(bh); set_buffer_mapped(bh); } return 0; } static int bget_one(handle_t *handle, struct buffer_head *bh) { get_bh(bh); return 0; } static int bput_one(handle_t *handle, struct buffer_head *bh) { put_bh(bh); return 0; } static int __ext4_journalled_writepage(struct page *page, unsigned int len) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs = NULL; handle_t *handle = NULL; int ret = 0, err = 0; int inline_data = ext4_has_inline_data(inode); struct buffer_head *inode_bh = NULL; ClearPageChecked(page); if (inline_data) { BUG_ON(page->index != 0); BUG_ON(len > ext4_get_max_inline_size(inode)); inode_bh = ext4_journalled_write_inline_data(inode, len, page); if (inode_bh == NULL) goto out; } else { page_bufs = page_buffers(page); if (!page_bufs) { BUG(); goto out; } ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); } /* * We need to release the page lock before we start the * journal, so grab a reference so the page won't disappear * out from under us. */ get_page(page); unlock_page(page); handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); put_page(page); goto out_no_pagelock; } BUG_ON(!ext4_handle_valid(handle)); lock_page(page); put_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ ext4_journal_stop(handle); ret = 0; goto out; } if (inline_data) { ret = ext4_mark_inode_dirty(handle, inode); } else { ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, write_end_fn); } if (ret == 0) ret = err; EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; err = ext4_journal_stop(handle); if (!ret) ret = err; if (!ext4_has_inline_data(inode)) ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, bput_one); ext4_set_inode_state(inode, EXT4_STATE_JDATA); out: unlock_page(page); out_no_pagelock: brelse(inode_bh); return ret; } /* * Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't * need to file the inode to the transaction's list in ordered mode because if * we are writing back data added by write(), the inode is already there and if * we are writing back data modified via mmap(), no one guarantees in which * transaction the data will hit the disk. In case we are journaling data, we * cannot start transaction directly because transaction start ranks above page * lock so we have to do some magic. * * This function can get called via... * - ext4_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) * * We don't do any block allocation in this function. If we have page with * multiple blocks we need to write those buffer_heads that are mapped. This * is important for mmaped based write. So if we do with blocksize 1K * truncate(f, 1024); * a = mmap(f, 0, 4096); * a[0] = 'a'; * truncate(f, 4096); * we have in the page first buffer_head mapped via page_mkwrite call back * but other buffer_heads would be unmapped but dirty (dirty done via the * do_wp_page). So writepage should write the first block. If we modify * the mmap area beyond 1024 we will again get a page_fault and the * page_mkwrite callback will do the block allocation and mark the * buffer_heads mapped. * * We redirty the page if we have any buffer_heads that is either delay or * unwritten in the page. * * We can get recursively called as show below. * * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> * ext4_writepage() * * But since we don't do any block allocation we should not deadlock. * Page also have the dirty flag cleared so we don't get recurive page_lock. */ static int ext4_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; loff_t size; unsigned int len; struct buffer_head *page_bufs = NULL; struct inode *inode = page->mapping->host; struct ext4_io_submit io_submit; bool keep_towrite = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { ext4_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return -EIO; } trace_ext4_writepage(page); size = i_size_read(inode); if (page->index == size >> PAGE_SHIFT && !ext4_verity_in_progress(inode)) len = size & ~PAGE_MASK; else len = PAGE_SIZE; page_bufs = page_buffers(page); /* * We cannot do block allocation or other extent handling in this * function. If there are buffers needing that, we have to redirty * the page. But we may reach here when we do a journal commit via * journal_submit_inode_data_buffers() and in that case we must write * allocated buffers to achieve data=ordered mode guarantees. * * Also, if there is only one buffer per page (the fs block * size == the page size), if one buffer needs block * allocation or needs to modify the extent tree to clear the * unwritten flag, we know that the page can't be written at * all, so we might as well refuse the write immediately. * Unfortunately if the block size != page size, we can't as * easily detect this case using ext4_walk_page_buffers(), but * for the extremely common case, this is an optimization that * skips a useless round trip through ext4_bio_write_page(). */ if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); if ((current->flags & PF_MEMALLOC) || (inode->i_sb->s_blocksize == PAGE_SIZE)) { /* * For memory cleaning there's no point in writing only * some buffers. So just bail out. Warn if we came here * from direct reclaim. */ WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC); unlock_page(page); return 0; } keep_towrite = true; } if (PageChecked(page) && ext4_should_journal_data(inode)) /* * It's mmapped pagecache. Add buffers and journal it. There * doesn't seem much point in redirtying the page here. */ return __ext4_journalled_writepage(page, len); ext4_io_submit_init(&io_submit, wbc); io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); if (!io_submit.io_end) { redirty_page_for_writepage(wbc, page); unlock_page(page); return -ENOMEM; } ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); ext4_io_submit(&io_submit); /* Drop io_end reference we got from init */ ext4_put_io_end_defer(io_submit.io_end); return ret; } static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) { int len; loff_t size; int err; BUG_ON(page->index != mpd->first_page); clear_page_dirty_for_io(page); /* * We have to be very careful here! Nothing protects writeback path * against i_size changes and the page can be writeably mapped into * page tables. So an application can be growing i_size and writing * data through mmap while writeback runs. clear_page_dirty_for_io() * write-protects our page in page tables and the page cannot get * written to again until we release page lock. So only after * clear_page_dirty_for_io() we are safe to sample i_size for * ext4_bio_write_page() to zero-out tail of the written page. We rely * on the barrier provided by TestClearPageDirty in * clear_page_dirty_for_io() to make sure i_size is really sampled only * after page tables are updated. */ size = i_size_read(mpd->inode); if (page->index == size >> PAGE_SHIFT && !ext4_verity_in_progress(mpd->inode)) len = size & ~PAGE_MASK; else len = PAGE_SIZE; err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); if (!err) mpd->wbc->nr_to_write--; mpd->first_page++; return err; } #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) /* * mballoc gives us at most this number of blocks... * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). * The rest of mballoc seems to handle chunks up to full group size. */ #define MAX_WRITEPAGES_EXTENT_LEN 2048 /* * mpage_add_bh_to_extent - try to add bh to extent of blocks to map * * @mpd - extent of blocks * @lblk - logical number of the block in the file * @bh - buffer head we want to add to the extent * * The function is used to collect contig. blocks in the same state. If the * buffer doesn't require mapping for writeback and we haven't started the * extent of buffers to map yet, the function returns 'true' immediately - the * caller can write the buffer right away. Otherwise the function returns true * if the block has been added to the extent, false if the block couldn't be * added. */ static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, struct buffer_head *bh) { struct ext4_map_blocks *map = &mpd->map; /* Buffer that doesn't need mapping for writeback? */ if (!buffer_dirty(bh) || !buffer_mapped(bh) || (!buffer_delay(bh) && !buffer_unwritten(bh))) { /* So far no extent to map => we write the buffer right away */ if (map->m_len == 0) return true; return false; } /* First block in the extent? */ if (map->m_len == 0) { /* We cannot map unless handle is started... */ if (!mpd->do_map) return false; map->m_lblk = lblk; map->m_len = 1; map->m_flags = bh->b_state & BH_FLAGS; return true; } /* Don't go larger than mballoc is willing to allocate */ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) return false; /* Can we merge the block to our big extent? */ if (lblk == map->m_lblk + map->m_len && (bh->b_state & BH_FLAGS) == map->m_flags) { map->m_len++; return true; } return false; } /* * mpage_process_page_bufs - submit page buffers for IO or add them to extent * * @mpd - extent of blocks for mapping * @head - the first buffer in the page * @bh - buffer we should start processing from * @lblk - logical number of the block in the file corresponding to @bh * * Walk through page buffers from @bh upto @head (exclusive) and either submit * the page for IO if all buffers in this page were mapped and there's no * accumulated extent of buffers to map or add buffers in the page to the * extent of buffers to map. The function returns 1 if the caller can continue * by processing the next page, 0 if it should stop adding buffers to the * extent to map because we cannot extend it anymore. It can also return value * < 0 in case of error during IO submission. */ static int mpage_process_page_bufs(struct mpage_da_data *mpd, struct buffer_head *head, struct buffer_head *bh, ext4_lblk_t lblk) { struct inode *inode = mpd->inode; int err; ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) >> inode->i_blkbits; if (ext4_verity_in_progress(inode)) blocks = EXT_MAX_BLOCKS; do { BUG_ON(buffer_locked(bh)); if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { /* Found extent to map? */ if (mpd->map.m_len) return 0; /* Buffer needs mapping and handle is not started? */ if (!mpd->do_map) return 0; /* Everything mapped so far and we hit EOF */ break; } } while (lblk++, (bh = bh->b_this_page) != head); /* So far everything mapped? Submit the page for IO. */ if (mpd->map.m_len == 0) { err = mpage_submit_page(mpd, head->b_page); if (err < 0) return err; } return lblk < blocks; } /* * mpage_process_page - update page buffers corresponding to changed extent and * may submit fully mapped page for IO * * @mpd - description of extent to map, on return next extent to map * @m_lblk - logical block mapping. * @m_pblk - corresponding physical mapping. * @map_bh - determines on return whether this page requires any further * mapping or not. * Scan given page buffers corresponding to changed extent and update buffer * state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits. * If the given page is not fully mapped, we update @map to the next extent in * the given page that needs mapping & return @map_bh as true. */ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page, ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk, bool *map_bh) { struct buffer_head *head, *bh; ext4_io_end_t *io_end = mpd->io_submit.io_end; ext4_lblk_t lblk = *m_lblk; ext4_fsblk_t pblock = *m_pblk; int err = 0; int blkbits = mpd->inode->i_blkbits; ssize_t io_end_size = 0; struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end); bh = head = page_buffers(page); do { if (lblk < mpd->map.m_lblk) continue; if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { /* * Buffer after end of mapped extent. * Find next buffer in the page to map. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; io_end_vec->size += io_end_size; io_end_size = 0; err = mpage_process_page_bufs(mpd, head, bh, lblk); if (err > 0) err = 0; if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) { io_end_vec = ext4_alloc_io_end_vec(io_end); io_end_vec->offset = mpd->map.m_lblk << blkbits; } *map_bh = true; goto out; } if (buffer_delay(bh)) { clear_buffer_delay(bh); bh->b_blocknr = pblock++; } clear_buffer_unwritten(bh); io_end_size += (1 << blkbits); } while (lblk++, (bh = bh->b_this_page) != head); io_end_vec->size += io_end_size; io_end_size = 0; *map_bh = false; out: *m_lblk = lblk; *m_pblk = pblock; return err; } /* * mpage_map_buffers - update buffers corresponding to changed extent and * submit fully mapped pages for IO * * @mpd - description of extent to map, on return next extent to map * * Scan buffers corresponding to changed extent (we expect corresponding pages * to be already locked) and update buffer state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits, * and mark buffers as uninit when we perform writes to unwritten extents * and do extent conversion after IO is finished. If the last page is not fully * mapped, we update @map to the next extent in the last page that needs * mapping. Otherwise we submit the page for IO. */ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) { struct pagevec pvec; int nr_pages, i; struct inode *inode = mpd->inode; int bpp_bits = PAGE_SHIFT - inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; ext4_fsblk_t pblock; int err; bool map_bh = false; start = mpd->map.m_lblk >> bpp_bits; end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; lblk = start << bpp_bits; pblock = mpd->map.m_pblk; pagevec_init(&pvec); while (start <= end) { nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &start, end); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; err = mpage_process_page(mpd, page, &lblk, &pblock, &map_bh); /* * If map_bh is true, means page may require further bh * mapping, or maybe the page was submitted for IO. * So we return to call further extent mapping. */ if (err < 0 || map_bh == true) goto out; /* Page fully mapped - let IO run! */ err = mpage_submit_page(mpd, page); if (err < 0) goto out; } pagevec_release(&pvec); } /* Extent fully mapped and matches with page boundary. We are done. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; return 0; out: pagevec_release(&pvec); return err; } static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int get_blocks_flags; int err, dioread_nolock; trace_ext4_da_write_pages_extent(inode, map); /* * Call ext4_map_blocks() to allocate any delayed allocation blocks, or * to convert an unwritten extent to be initialized (in the case * where we have written into one or more preallocated blocks). It is * possible that we're going to need more metadata blocks than * previously reserved. However we must not fail because we're in * writeback and there is nothing we can do about it so it might result * in data loss. So use reserved blocks to allocate metadata if * possible. * * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if * the blocks in question are delalloc blocks. This indicates * that the blocks and quotas has already been checked when * the data was copied into the page cache. */ get_blocks_flags = EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_GET_BLOCKS_IO_SUBMIT; dioread_nolock = ext4_should_dioread_nolock(inode); if (dioread_nolock) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; if (map->m_flags & (1 << BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; err = ext4_map_blocks(handle, inode, map, get_blocks_flags); if (err < 0) return err; if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { if (!mpd->io_submit.io_end->handle && ext4_handle_valid(handle)) { mpd->io_submit.io_end->handle = handle->h_rsv_handle; handle->h_rsv_handle = NULL; } ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); } BUG_ON(map->m_len == 0); return 0; } /* * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length * mpd->len and submit pages underlying it for IO * * @handle - handle for journal operations * @mpd - extent to map * @give_up_on_write - we set this to true iff there is a fatal error and there * is no hope of writing the data. The caller should discard * dirty pages to avoid infinite loops. * * The function maps extent starting at mpd->lblk of length mpd->len. If it is * delayed, blocks are allocated, if it is unwritten, we may need to convert * them to initialized or split the described range from larger unwritten * extent. Note that we need not map all the described range since allocation * can return less blocks or the range is covered by more unwritten extents. We * cannot map more because we are limited by reserved transaction credits. On * the other hand we always make sure that the last touched page is fully * mapped so that it can be written out (and thus forward progress is * guaranteed). After mapping we submit all mapped pages for IO. */ static int mpage_map_and_submit_extent(handle_t *handle, struct mpage_da_data *mpd, bool *give_up_on_write) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int err; loff_t disksize; int progress = 0; ext4_io_end_t *io_end = mpd->io_submit.io_end; struct ext4_io_end_vec *io_end_vec = ext4_alloc_io_end_vec(io_end); io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits; do { err = mpage_map_one_extent(handle, mpd); if (err < 0) { struct super_block *sb = inode->i_sb; if (ext4_forced_shutdown(EXT4_SB(sb)) || EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) goto invalidate_dirty_pages; /* * Let the uper layers retry transient errors. * In the case of ENOSPC, if ext4_count_free_blocks() * is non-zero, a commit should free up blocks. */ if ((err == -ENOMEM) || (err == -ENOSPC && ext4_count_free_clusters(sb))) { if (progress) goto update_disksize; return err; } ext4_msg(sb, KERN_CRIT, "Delayed block allocation failed for " "inode %lu at logical offset %llu with" " max blocks %u with error %d", inode->i_ino, (unsigned long long)map->m_lblk, (unsigned)map->m_len, -err); ext4_msg(sb, KERN_CRIT, "This should not happen!! Data will " "be lost\n"); if (err == -ENOSPC) ext4_print_free_blocks(inode); invalidate_dirty_pages: *give_up_on_write = true; return err; } progress = 1; /* * Update buffer state, submit mapped pages, and get us new * extent to map */ err = mpage_map_and_submit_buffers(mpd); if (err < 0) goto update_disksize; } while (map->m_len); update_disksize: /* * Update on-disk size after IO is submitted. Races with * truncate are avoided by checking i_size under i_data_sem. */ disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; if (disksize > EXT4_I(inode)->i_disksize) { int err2; loff_t i_size; down_write(&EXT4_I(inode)->i_data_sem); i_size = i_size_read(inode); if (disksize > i_size) disksize = i_size; if (disksize > EXT4_I(inode)->i_disksize) EXT4_I(inode)->i_disksize = disksize; up_write(&EXT4_I(inode)->i_data_sem); err2 = ext4_mark_inode_dirty(handle, inode); if (err2) ext4_error(inode->i_sb, "Failed to mark inode %lu dirty", inode->i_ino); if (!err) err = err2; } return err; } /* * Calculate the total number of credits to reserve for one writepages * iteration. This is called from ext4_writepages(). We map an extent of * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + * bpp - 1 blocks in bpp different extents. */ static int ext4_da_writepages_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); return ext4_meta_trans_blocks(inode, MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); } /* * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages * and underlying extent to map * * @mpd - where to look for pages * * Walk dirty pages in the mapping. If they are fully mapped, submit them for * IO immediately. When we find a page which isn't mapped we start accumulating * extent of buffers underlying these pages that needs mapping (formed by * either delayed or unwritten buffers). We also lock the pages containing * these buffers. The extent found is returned in @mpd structure (starting at * mpd->lblk with length mpd->len blocks). * * Note that this function can attach bios to one io_end structure which are * neither logically nor physically contiguous. Although it may seem as an * unnecessary complication, it is actually inevitable in blocksize < pagesize * case as we need to track IO to all buffers underlying a page in one io_end. */ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) { struct address_space *mapping = mpd->inode->i_mapping; struct pagevec pvec; unsigned int nr_pages; long left = mpd->wbc->nr_to_write; pgoff_t index = mpd->first_page; pgoff_t end = mpd->last_page; xa_mark_t tag; int i, err = 0; int blkbits = mpd->inode->i_blkbits; ext4_lblk_t lblk; struct buffer_head *head; if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; pagevec_init(&pvec); mpd->map.m_len = 0; mpd->next_page = index; while (index <= end) { nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, tag); if (nr_pages == 0) goto out; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * Accumulated enough dirty pages? This doesn't apply * to WB_SYNC_ALL mode. For integrity sync we have to * keep going because someone may be concurrently * dirtying pages, and we might have synced a lot of * newly appeared dirty pages, but have not synced all * of the old dirty pages. */ if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) goto out; /* If we can't merge this page, we are done. */ if (mpd->map.m_len > 0 && mpd->next_page != page->index) goto out; lock_page(page); /* * If the page is no longer dirty, or its mapping no * longer corresponds to inode we are writing (which * means it has been truncated or invalidated), or the * page is already under writeback and we are not doing * a data integrity writeback, skip the page */ if (!PageDirty(page) || (PageWriteback(page) && (mpd->wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { unlock_page(page); continue; } wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; /* Add all dirty buffers to mpd */ lblk = ((ext4_lblk_t)page->index) << (PAGE_SHIFT - blkbits); head = page_buffers(page); err = mpage_process_page_bufs(mpd, head, head, lblk); if (err <= 0) goto out; err = 0; left--; } pagevec_release(&pvec); cond_resched(); } return 0; out: pagevec_release(&pvec); return err; } static int ext4_writepages(struct address_space *mapping, struct writeback_control *wbc) { pgoff_t writeback_index = 0; long nr_to_write = wbc->nr_to_write; int range_whole = 0; int cycled = 1; handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; int needed_blocks, rsv_blocks = 0, ret = 0; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); bool done; struct blk_plug plug; bool give_up_on_write = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; percpu_down_read(&sbi->s_journal_flag_rwsem); trace_ext4_writepages(inode, wbc); /* * No pages to write? This is mainly a kludge to avoid starting * a transaction for special inodes like journal inode on last iput() * because that could violate lock ordering on umount */ if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) goto out_writepages; if (ext4_should_journal_data(inode)) { ret = generic_writepages(mapping, wbc); goto out_writepages; } /* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because * the latter could be true if the filesystem is mounted * read-only, and in that case, ext4_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { ret = -EROFS; goto out_writepages; } /* * If we have inline data and arrive here, it means that * we will soon create the block for the 1st page, so * we'd better clear the inline data here. */ if (ext4_has_inline_data(inode)) { /* Just inode will be modified... */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_writepages; } BUG_ON(ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)); ext4_destroy_inline_data(handle, inode); ext4_journal_stop(handle); } if (ext4_should_dioread_nolock(inode)) { /* * We may need to convert up to one extent per block in * the page and we may dirty the inode. */ rsv_blocks = 1 + ext4_chunk_trans_blocks(inode, PAGE_SIZE >> inode->i_blkbits); } if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; if (writeback_index) cycled = 0; mpd.first_page = writeback_index; mpd.last_page = -1; } else { mpd.first_page = wbc->range_start >> PAGE_SHIFT; mpd.last_page = wbc->range_end >> PAGE_SHIFT; } mpd.inode = inode; mpd.wbc = wbc; ext4_io_submit_init(&mpd.io_submit, wbc); retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); done = false; blk_start_plug(&plug); /* * First writeback pages that don't need mapping - we can avoid * starting a transaction unnecessarily and also avoid being blocked * in the block layer on device congestion while having transaction * started. */ mpd.do_map = 0; mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { ret = -ENOMEM; goto unplug; } ret = mpage_prepare_extent_to_map(&mpd); /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, false); /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); ext4_put_io_end_defer(mpd.io_submit.io_end); mpd.io_submit.io_end = NULL; if (ret < 0) goto unplug; while (!done && mpd.first_page <= mpd.last_page) { /* For each extent of pages we use new io_end */ mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { ret = -ENOMEM; break; } /* * We have two constraints: We find one extent to map and we * must always write out whole page (makes a difference when * blocksize < pagesize) so that we don't block on IO when we * try to write out the rest of the page. Journalled mode is * not supported by delalloc. */ BUG_ON(ext4_should_journal_data(inode)); needed_blocks = ext4_da_writepages_trans_blocks(inode); /* start a new transaction */ handle = ext4_journal_start_with_reserve(inode, EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " "%ld pages, ino %lu; err %d", __func__, wbc->nr_to_write, inode->i_ino, ret); /* Release allocated io_end */ ext4_put_io_end(mpd.io_submit.io_end); mpd.io_submit.io_end = NULL; break; } mpd.do_map = 1; trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); ret = mpage_prepare_extent_to_map(&mpd); if (!ret) { if (mpd.map.m_len) ret = mpage_map_and_submit_extent(handle, &mpd, &give_up_on_write); else { /* * We scanned the whole range (or exhausted * nr_to_write), submitted what was mapped and * didn't find anything needing mapping. We are * done. */ done = true; } } /* * Caution: If the handle is synchronous, * ext4_journal_stop() can wait for transaction commit * to finish which may depend on writeback of pages to * complete or on page lock to be released. In that * case, we have to wait until after after we have * submitted all the IO, released page locks we hold, * and dropped io_end reference (for extent conversion * to be able to complete) before stopping the handle. */ if (!ext4_handle_valid(handle) || handle->h_sync == 0) { ext4_journal_stop(handle); handle = NULL; mpd.do_map = 0; } /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, give_up_on_write); /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); /* * Drop our io_end reference we got from init. We have * to be careful and use deferred io_end finishing if * we are still holding the transaction as we can * release the last reference to io_end which may end * up doing unwritten extent conversion. */ if (handle) { ext4_put_io_end_defer(mpd.io_submit.io_end); ext4_journal_stop(handle); } else ext4_put_io_end(mpd.io_submit.io_end); mpd.io_submit.io_end = NULL; if (ret == -ENOSPC && sbi->s_journal) { /* * Commit the transaction which would * free blocks released in the transaction * and try again */ jbd2_journal_force_commit_nested(sbi->s_journal); ret = 0; continue; } /* Fatal error - ENOMEM, EIO... */ if (ret) break; } unplug: blk_finish_plug(&plug); if (!ret && !cycled && wbc->nr_to_write > 0) { cycled = 1; mpd.last_page = writeback_index - 1; mpd.first_page = 0; goto retry; } /* Update index */ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* * Set the writeback_index so that range_cyclic * mode will write it back later */ mapping->writeback_index = mpd.first_page; out_writepages: trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); percpu_up_read(&sbi->s_journal_flag_rwsem); return ret; } static int ext4_dax_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; long nr_to_write = wbc->nr_to_write; struct inode *inode = mapping->host; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; percpu_down_read(&sbi->s_journal_flag_rwsem); trace_ext4_writepages(inode, wbc); ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc); trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); percpu_up_read(&sbi->s_journal_flag_rwsem); return ret; } static int ext4_nonda_switch(struct super_block *sb) { s64 free_clusters, dirty_clusters; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * switch to non delalloc mode if we are running low * on free block. The free block accounting via percpu * counters can get slightly wrong with percpu_counter_batch getting * accumulated on each CPU without updating global counters * Delalloc need an accurate free block accounting. So switch * to non delalloc when we are near to error range. */ free_clusters = percpu_counter_read_positive(&sbi->s_freeclusters_counter); dirty_clusters = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); /* * Start pushing delalloc when 1/2 of free blocks are dirty. */ if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); if (2 * free_clusters < 3 * dirty_clusters || free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { /* * free block count is less than 150% of dirty blocks * or free blocks is less than watermark */ return 1; } return 0; } /* We always reserve for an inode update; the superblock could be there too */ static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) { if (likely(ext4_has_feature_large_file(inode->i_sb))) return 1; if (pos + len <= 0x7fffffffULL) return 1; /* We might need to update the superblock to set LARGE_FILE */ return 2; } static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret, retries = 0; struct page *page; pgoff_t index; struct inode *inode = mapping->host; handle_t *handle; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; index = pos >> PAGE_SHIFT; if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) || ext4_verity_in_progress(inode)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, len, flags, pagep, fsdata); } *fsdata = (void *)0; trace_ext4_da_write_begin(inode, pos, len, flags); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, flags, pagep, fsdata); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); /* * With delayed allocation, we don't log the i_disksize update * if there is delayed block allocation. But we still need * to journalling the i_disksize update if writes to the end * of file which has an already mapped buffer. */ retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { put_page(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); put_page(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_FS_ENCRYPTION ret = ext4_block_write_begin(page, pos, len, ext4_da_get_block_prep); #else ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); #endif if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) ext4_truncate_failed_write(inode); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; put_page(page); return ret; } *pagep = page; return ret; } /* * Check if we should update i_disksize * when write to the end of file but not require block allocation */ static int ext4_da_should_update_i_disksize(struct page *page, unsigned long offset) { struct buffer_head *bh; struct inode *inode = page->mapping->host; unsigned int idx; int i; bh = page_buffers(page); idx = offset >> inode->i_blkbits; for (i = 0; i < idx; i++) bh = bh->b_this_page; if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) return 0; return 1; } static int ext4_da_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int ret = 0, ret2; handle_t *handle = ext4_journal_current_handle(); loff_t new_i_size; unsigned long start, end; int write_mode = (int)(unsigned long)fsdata; if (write_mode == FALL_BACK_TO_NONDELALLOC) return ext4_write_end(file, mapping, pos, len, copied, page, fsdata); trace_ext4_da_write_end(inode, pos, len, copied); start = pos & (PAGE_SIZE - 1); end = start + copied - 1; /* * generic_write_end() will run mark_inode_dirty() if i_size * changes. So let's piggyback the i_disksize mark_inode_dirty * into that. */ new_i_size = pos + copied; if (copied && new_i_size > EXT4_I(inode)->i_disksize) { if (ext4_has_inline_data(inode) || ext4_da_should_update_i_disksize(page, end)) { ext4_update_i_disksize(inode, new_i_size); /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_mark_inode_dirty(handle, inode); } } if (write_mode != CONVERT_INLINE_DATA && ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && ext4_has_inline_data(inode)) ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, page); else ret2 = generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; return ret ? ret : copied; } /* * Force all delayed allocation blocks to be allocated for a given inode. */ int ext4_alloc_da_blocks(struct inode *inode) { trace_ext4_alloc_da_blocks(inode); if (!EXT4_I(inode)->i_reserved_data_blocks) return 0; /* * We do something simple for now. The filemap_flush() will * also start triggering a write of the data blocks, which is * not strictly speaking necessary (and for users of * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: * * ext4_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() * mpage_da_map_blocks() * * The problem is that write_cache_pages(), located in * mm/page-writeback.c, marks pages clean in preparation for * doing I/O, which is not desirable if we're not planning on * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of * the pages by calling redirty_page_for_writepage() but that * would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, * simplifying them because we wouldn't actually intend to * write out the pages, but rather only collect contiguous * logical block extents, call the multi-block allocator, and * then update the buffer heads with the block allocations. * * For now, though, we'll cheat by calling filemap_flush(), * which will map the blocks, and start the I/O, but not * actually wait for the I/O to complete. */ return filemap_flush(inode->i_mapping); } /* * bmap() is special. It gets used by applications such as lilo and by * the swapper to find the on-disk block of a specific piece of data. * * Naturally, this is dangerous if the block concerned is still in the * journal. If somebody makes a swapfile on an ext4 data-journaling * filesystem and enables swap, then they may get a nasty shock when the * data getting swapped to that swapfile suddenly gets overwritten by * the original zero's written out previously to the journal and * awaiting writeback in the kernel's buffer cache. * * So, if we see any bmap calls here on a modified, data-journaled file, * take extra steps to flush any blocks which might be in the cache. */ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; int err; /* * We can get here for an inline file via the FIBMAP ioctl */ if (ext4_has_inline_data(inode)) return 0; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { /* * With delalloc we want to sync the file * so that we can make sure we allocate * blocks for file */ filemap_write_and_wait(mapping); } if (EXT4_JOURNAL(inode) && ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { /* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * * NB. EXT4_STATE_JDATA is not set on files other than * regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ ext4_clear_inode_state(inode, EXT4_STATE_JDATA); journal = EXT4_JOURNAL(inode); jbd2_journal_lock_updates(journal); err = jbd2_journal_flush(journal); jbd2_journal_unlock_updates(journal); if (err) return 0; } return generic_block_bmap(mapping, block, ext4_get_block); } static int ext4_readpage(struct file *file, struct page *page) { int ret = -EAGAIN; struct inode *inode = page->mapping->host; trace_ext4_readpage(page); if (ext4_has_inline_data(inode)) ret = ext4_readpage_inline(inode, page); if (ret == -EAGAIN) return ext4_mpage_readpages(page->mapping, NULL, page, 1, false); return ret; } static int ext4_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; /* If the file has inline data, no need to do readpages. */ if (ext4_has_inline_data(inode)) return 0; return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); } static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { trace_ext4_invalidatepage(page, offset, length); /* No journalling happens on data buffers when this function is used */ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); block_invalidatepage(page, offset, length); } static int __ext4_journalled_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); trace_ext4_journalled_invalidatepage(page, offset, length); /* * If it's a full truncate we just forget about the pending dirtying */ if (offset == 0 && length == PAGE_SIZE) ClearPageChecked(page); return jbd2_journal_invalidatepage(journal, page, offset, length); } /* Wrapper for aops... */ static void ext4_journalled_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); trace_ext4_releasepage(page); /* Page has dirty journalled data -> cannot release */ if (PageChecked(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); else return try_to_free_buffers(page); } static bool ext4_inode_datasync_dirty(struct inode *inode) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; if (journal) return !jbd2_transaction_committed(journal, EXT4_I(inode)->i_datasync_tid); /* Any metadata buffers to write? */ if (!list_empty(&inode->i_mapping->private_list)) return true; return inode->i_state & I_DIRTY_DATASYNC; } static void ext4_set_iomap(struct inode *inode, struct iomap *iomap, struct ext4_map_blocks *map, loff_t offset, loff_t length) { u8 blkbits = inode->i_blkbits; /* * Writes that span EOF might trigger an I/O size update on completion, * so consider them to be dirty for the purpose of O_DSYNC, even if * there is no other metadata changes being made or are pending. */ iomap->flags = 0; if (ext4_inode_datasync_dirty(inode) || offset + length > i_size_read(inode)) iomap->flags |= IOMAP_F_DIRTY; if (map->m_flags & EXT4_MAP_NEW) iomap->flags |= IOMAP_F_NEW; iomap->bdev = inode->i_sb->s_bdev; iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; iomap->offset = (u64) map->m_lblk << blkbits; iomap->length = (u64) map->m_len << blkbits; /* * Flags passed to ext4_map_blocks() for direct I/O writes can result * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits * set. In order for any allocated unwritten extents to be converted * into written extents correctly within the ->end_io() handler, we * need to ensure that the iomap->type is set appropriately. Hence, the * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has * been set first. */ if (map->m_flags & EXT4_MAP_UNWRITTEN) { iomap->type = IOMAP_UNWRITTEN; iomap->addr = (u64) map->m_pblk << blkbits; } else if (map->m_flags & EXT4_MAP_MAPPED) { iomap->type = IOMAP_MAPPED; iomap->addr = (u64) map->m_pblk << blkbits; } else { iomap->type = IOMAP_HOLE; iomap->addr = IOMAP_NULL_ADDR; } } static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map, unsigned int flags) { handle_t *handle; u8 blkbits = inode->i_blkbits; int ret, dio_credits, m_flags = 0, retries = 0; /* * Trim the mapping request to the maximum value that we can map at * once for direct I/O. */ if (map->m_len > DIO_MAX_BLOCKS) map->m_len = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, map->m_len); retry: /* * Either we allocate blocks and then don't get an unwritten extent, so * in that case we have reserved enough credits. Or, the blocks are * already allocated and unwritten. In that case, the extent conversion * fits into the credits as well. */ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); if (IS_ERR(handle)) return PTR_ERR(handle); /* * DAX and direct I/O are the only two operations that are currently * supported with IOMAP_WRITE. */ WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT)); if (IS_DAX(inode)) m_flags = EXT4_GET_BLOCKS_CREATE_ZERO; /* * We use i_size instead of i_disksize here because delalloc writeback * can complete at any point during the I/O and subsequently push the * i_disksize out to i_size. This could be beyond where direct I/O is * happening and thus expose allocated blocks to direct I/O reads. */ else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode)) m_flags = EXT4_GET_BLOCKS_CREATE; else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT; ret = ext4_map_blocks(handle, inode, map, m_flags); /* * We cannot fill holes in indirect tree based inodes as that could * expose stale data in the case of a crash. Use the magic error code * to fallback to buffered I/O. */ if (!m_flags && !ret) ret = -ENOTBLK; ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; return ret; } static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { int ret; struct ext4_map_blocks map; u8 blkbits = inode->i_blkbits; if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) return -EINVAL; if (WARN_ON_ONCE(ext4_has_inline_data(inode))) return -ERANGE; /* * Calculate the first and last logical blocks respectively. */ map.m_lblk = offset >> blkbits; map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; if (flags & IOMAP_WRITE) ret = ext4_iomap_alloc(inode, &map, flags); else ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret < 0) return ret; ext4_set_iomap(inode, iomap, &map, offset, length); return 0; } static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap) { /* * Check to see whether an error occurred while writing out the data to * the allocated blocks. If so, return the magic error code so that we * fallback to buffered I/O and attempt to complete the remainder of * the I/O. Any blocks that may have been allocated in preparation for * the direct I/O will be reused during buffered I/O. */ if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0) return -ENOTBLK; return 0; } const struct iomap_ops ext4_iomap_ops = { .iomap_begin = ext4_iomap_begin, .iomap_end = ext4_iomap_end, }; static bool ext4_iomap_is_delalloc(struct inode *inode, struct ext4_map_blocks *map) { struct extent_status es; ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1; ext4_es_find_extent_range(inode, &ext4_es_is_delayed, map->m_lblk, end, &es); if (!es.es_len || es.es_lblk > end) return false; if (es.es_lblk > map->m_lblk) { map->m_len = es.es_lblk - map->m_lblk; return false; } offset = map->m_lblk - es.es_lblk; map->m_len = es.es_len - offset; return true; } static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { int ret; bool delalloc = false; struct ext4_map_blocks map; u8 blkbits = inode->i_blkbits; if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) return -EINVAL; if (ext4_has_inline_data(inode)) { ret = ext4_inline_data_iomap(inode, iomap); if (ret != -EAGAIN) { if (ret == 0 && offset >= iomap->length) ret = -ENOENT; return ret; } } /* * Calculate the first and last logical block respectively. */ map.m_lblk = offset >> blkbits; map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret < 0) return ret; if (ret == 0) delalloc = ext4_iomap_is_delalloc(inode, &map); ext4_set_iomap(inode, iomap, &map, offset, length); if (delalloc && iomap->type == IOMAP_HOLE) iomap->type = IOMAP_DELALLOC; return 0; } const struct iomap_ops ext4_iomap_report_ops = { .iomap_begin = ext4_iomap_begin_report, }; /* * Pages can be marked dirty completely asynchronously from ext4's journalling * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do * much here because ->set_page_dirty is called under VFS locks. The page is * not necessarily locked. * * We cannot just dirty the page and leave attached buffers clean, because the * buffers' dirty state is "definitive". We cannot just set the buffers dirty * or jbddirty because all the journalling code will explode. * * So what we do is to mark the page "pending dirty" and next time writepage * is called, propagate that into the buffers appropriately. */ static int ext4_journalled_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_nobuffers(page); } static int ext4_set_page_dirty(struct page *page) { WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page)); WARN_ON_ONCE(!page_has_buffers(page)); return __set_page_dirty_buffers(page); } static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_journalled_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .set_page_dirty = ext4_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = noop_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_dax_aops = { .writepages = ext4_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = noop_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = noop_invalidatepage, }; void ext4_set_aops(struct inode *inode) { switch (ext4_inode_journal_mode(inode)) { case EXT4_INODE_ORDERED_DATA_MODE: case EXT4_INODE_WRITEBACK_DATA_MODE: break; case EXT4_INODE_JOURNAL_DATA_MODE: inode->i_mapping->a_ops = &ext4_journalled_aops; return; default: BUG(); } if (IS_DAX(inode)) inode->i_mapping->a_ops = &ext4_dax_aops; else if (test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; else inode->i_mapping->a_ops = &ext4_aops; } static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { ext4_fsblk_t index = from >> PAGE_SHIFT; unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; int err = 0; page = find_or_create_page(mapping, from >> PAGE_SHIFT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) return -ENOMEM; blocksize = inode->i_sb->s_blocksize; iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } if (buffer_freed(bh)) { BUFFER_TRACE(bh, "freed: skip"); goto unlock; } if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "unmapped"); ext4_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto unlock; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(REQ_OP_READ, 0, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks( page, blocksize, bh_offset(bh))); } } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto unlock; } zero_user(page, offset, length); BUFFER_TRACE(bh, "zeroed end of block"); if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); } else { err = 0; mark_buffer_dirty(bh); if (ext4_should_order_data(inode)) err = ext4_jbd2_inode_add_write(handle, inode, from, length); } unlock: unlock_page(page); put_page(page); return err; } /* * ext4_block_zero_page_range() zeros out a mapping of length 'length' * starting from file offset 'from'. The range to be zero'd must * be contained with in one block. If the specified range exceeds * the end of the block it will be shortened to end of the block * that cooresponds to 'from' */ static int ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { struct inode *inode = mapping->host; unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned max = blocksize - (offset & (blocksize - 1)); /* * correct length if it does not fall between * 'from' and the end of the block */ if (length > max || length < 0) length = max; if (IS_DAX(inode)) { return iomap_zero_range(inode, from, length, NULL, &ext4_iomap_ops); } return __ext4_block_zero_page_range(handle, mapping, from, length); } /* * ext4_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */ static int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from) { unsigned offset = from & (PAGE_SIZE-1); unsigned length; unsigned blocksize; struct inode *inode = mapping->host; /* If we are processing an encrypted inode during orphan list handling */ if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode)) return 0; blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); return ext4_block_zero_page_range(handle, mapping, from, length); } int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, loff_t lstart, loff_t length) { struct super_block *sb = inode->i_sb; struct address_space *mapping = inode->i_mapping; unsigned partial_start, partial_end; ext4_fsblk_t start, end; loff_t byte_end = (lstart + length - 1); int err = 0; partial_start = lstart & (sb->s_blocksize - 1); partial_end = byte_end & (sb->s_blocksize - 1); start = lstart >> sb->s_blocksize_bits; end = byte_end >> sb->s_blocksize_bits; /* Handle partial zero within the single block */ if (start == end && (partial_start || (partial_end != sb->s_blocksize - 1))) { err = ext4_block_zero_page_range(handle, mapping, lstart, length); return err; } /* Handle partial zero out on the start of the range */ if (partial_start) { err = ext4_block_zero_page_range(handle, mapping, lstart, sb->s_blocksize); if (err) return err; } /* Handle partial zero out on the end of the range */ if (partial_end != sb->s_blocksize - 1) err = ext4_block_zero_page_range(handle, mapping, byte_end - partial_end, partial_end + 1); return err; } int ext4_can_truncate(struct inode *inode) { if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) return 1; if (S_ISLNK(inode->i_mode)) return !ext4_inode_is_fast_symlink(inode); return 0; } /* * We have to make sure i_disksize gets properly updated before we truncate * page cache due to hole punching or zero range. Otherwise i_disksize update * can get lost as it may have been postponed to submission of writeback but * that will never happen after we truncate page cache. */ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, loff_t len) { handle_t *handle; loff_t size = i_size_read(inode); WARN_ON(!inode_is_locked(inode)); if (offset > size || offset + len < size) return 0; if (EXT4_I(inode)->i_disksize >= size) return 0; handle = ext4_journal_start(inode, EXT4_HT_MISC, 1); if (IS_ERR(handle)) return PTR_ERR(handle); ext4_update_i_disksize(inode, size); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); return 0; } static void ext4_wait_dax_page(struct ext4_inode_info *ei) { up_write(&ei->i_mmap_sem); schedule(); down_write(&ei->i_mmap_sem); } int ext4_break_layouts(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct page *page; int error; if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) return -EINVAL; do { page = dax_layout_busy_page(inode->i_mapping); if (!page) return 0; error = ___wait_var_event(&page->_refcount, atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, 0, 0, ext4_wait_dax_page(ei)); } while (error == 0); return error; } /* * ext4_punch_hole: punches a hole in a file by releasing the blocks * associated with the given offset and length * * @inode: File inode * @offset: The offset where the hole will begin * @len: The length of the hole * * Returns: 0 on success or negative on failure */ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_punch_hole(inode, offset, length, 0); ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); if (ext4_has_inline_data(inode)) { down_write(&EXT4_I(inode)->i_mmap_sem); ret = ext4_convert_inline_data(inode); up_write(&EXT4_I(inode)->i_mmap_sem); if (ret) return ret; } /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (ret) return ret; } inode_lock(inode); /* No need to punch hole beyond i_size */ if (offset >= inode->i_size) goto out_mutex; /* * If the hole extends beyond i_size, set the hole * to end after the page that contains i_size */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - offset; } if (offset & (sb->s_blocksize - 1) || (offset + length) & (sb->s_blocksize - 1)) { /* * Attach jinode to inode for jbd2 if we do any zeroing of * partial block */ ret = ext4_inode_attach_jinode(inode); if (ret < 0) goto out_mutex; } /* Wait all existing dio workers, newcomers will block on i_mutex */ inode_dio_wait(inode); /* * Prevent page faults from reinstantiating pages we have released from * page cache. */ down_write(&EXT4_I(inode)->i_mmap_sem); ret = ext4_break_layouts(inode); if (ret) goto out_dio; first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; /* Now release the pages and zero block aligned part of pages*/ if (last_block_offset > first_block_offset) { ret = ext4_update_disksize_before_punch(inode, offset, length); if (ret) goto out_dio; truncate_pagecache_range(inode, first_block_offset, last_block_offset); } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(sb, ret); goto out_dio; } ret = ext4_zero_partial_blocks(handle, inode, offset, length); if (ret) goto out_stop; first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); /* If there are blocks to remove, do it */ if (stop_block > first_block) { down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_remove_space(inode, first_block, stop_block - 1); else ret = ext4_ind_remove_space(handle, inode, first_block, stop_block); up_write(&EXT4_I(inode)->i_data_sem); } if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = current_time(inode); ext4_mark_inode_dirty(handle, inode); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out_stop: ext4_journal_stop(handle); out_dio: up_write(&EXT4_I(inode)->i_mmap_sem); out_mutex: inode_unlock(inode); return ret; } int ext4_inode_attach_jinode(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct jbd2_inode *jinode; if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) return 0; jinode = jbd2_alloc_inode(GFP_KERNEL); spin_lock(&inode->i_lock); if (!ei->jinode) { if (!jinode) { spin_unlock(&inode->i_lock); return -ENOMEM; } ei->jinode = jinode; jbd2_journal_init_jbd_inode(ei->jinode, inode); jinode = NULL; } spin_unlock(&inode->i_lock); if (unlikely(jinode != NULL)) jbd2_free_inode(jinode); return 0; } /* * ext4_truncate() * * We block out ext4_get_block() block instantiations across the entire * transaction, and VFS/VM ensures that ext4_truncate() cannot run * simultaneously on behalf of the same inode. * * As we work through the truncate and commit bits of it to the journal there * is one core, guiding principle: the file's tree must always be consistent on * disk. We must be able to restart the truncate after a crash. * * The file's tree may be transiently inconsistent in memory (although it * probably isn't), but whenever we close off and commit a journal transaction, * the contents of (the filesystem + the journal) must be consistent and * restartable. It's pretty simple, really: bottom up, right to left (although * left-to-right works OK too). * * Note that at recovery time, journal replay occurs *before* the restart of * truncate against the orphan inode list. * * The committed inode has the new, desired i_size (which is the same as * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see * that this inode's truncate did not complete and it will again call * ext4_truncate() to have another go. So there will be instantiated blocks * to the right of the truncation point in a crashed ext4 filesystem. But * that's fine - as long as they are linked from the inode, the post-crash * ext4_truncate() run will find them and release them. */ int ext4_truncate(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); unsigned int credits; int err = 0; handle_t *handle; struct address_space *mapping = inode->i_mapping; /* * There is a possibility that we're either freeing the inode * or it's a completely new inode. In those cases we might not * have i_mutex locked because it's not necessary. */ if (!(inode->i_state & (I_NEW|I_FREEING))) WARN_ON(!inode_is_locked(inode)); trace_ext4_truncate_enter(inode); if (!ext4_can_truncate(inode)) return 0; ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); if (ext4_has_inline_data(inode)) { int has_inline = 1; err = ext4_inline_data_truncate(inode, &has_inline); if (err) return err; if (has_inline) return 0; } /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { if (ext4_inode_attach_jinode(inode) < 0) return 0; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) return PTR_ERR(handle); if (inode->i_size & (inode->i_sb->s_blocksize - 1)) ext4_block_truncate_page(handle, mapping, inode->i_size); /* * We add the inode to the orphan list, so that if this * truncate spans multiple transactions, and we crash, we will * resume the truncate when the filesystem recovers. It also * marks the inode dirty, to catch the new size. * * Implication: the file must always be in a sane, consistent * truncatable state while each transaction commits. */ err = ext4_orphan_add(handle, inode); if (err) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) err = ext4_ext_truncate(handle, inode); else ext4_ind_truncate(handle, inode); up_write(&ei->i_data_sem); if (err) goto out_stop; if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: /* * If this was a simple ftruncate() and the file will remain alive, * then we need to clear up the orphan record which we created above. * However, if this was a real unlink then we were called by * ext4_evict_inode(), and we allow that function to clean up the * orphan info for us. */ if (inode->i_nlink) ext4_orphan_del(handle, inode); inode->i_mtime = inode->i_ctime = current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); trace_ext4_truncate_exit(inode); return err; } /* * ext4_get_inode_loc returns with an extra refcount against the inode's * underlying buffer_head on success. If 'in_mem' is true, we have all * data in memory that is needed to recreate the on-disk version of this * inode. */ static int __ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc, int in_mem) { struct ext4_group_desc *gdp; struct buffer_head *bh; struct super_block *sb = inode->i_sb; ext4_fsblk_t block; struct blk_plug plug; int inodes_per_block, inode_offset; iloc->bh = NULL; if (inode->i_ino < EXT4_ROOT_INO || inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return -EFSCORRUPTED; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); if (!gdp) return -EIO; /* * Figure out the offset within the block group inode table */ inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; inode_offset = ((inode->i_ino - 1) % EXT4_INODES_PER_GROUP(sb)); block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); bh = sb_getblk(sb, block); if (unlikely(!bh)) return -ENOMEM; if (!buffer_uptodate(bh)) { lock_buffer(bh); /* * If the buffer has the write error flag, we have failed * to write out another inode in the same block. In this * case, we don't have to read the block because we may * read the old inode data successfully. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) set_buffer_uptodate(bh); if (buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; } /* * If we have all information of the inode in memory and this * is the only valid inode in the block, we need not read the * block. */ if (in_mem) { struct buffer_head *bitmap_bh; int i, start; start = inode_offset & ~(inodes_per_block - 1); /* Is the inode bitmap in cache? */ bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); if (unlikely(!bitmap_bh)) goto make_io; /* * If the inode bitmap isn't in cache then the * optimisation may end up performing two reads instead * of one, so skip it. */ if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); goto make_io; } for (i = start; i < start + inodes_per_block; i++) { if (i == inode_offset) continue; if (ext4_test_bit(i, bitmap_bh->b_data)) break; } brelse(bitmap_bh); if (i == start + inodes_per_block) { /* all other inodes are free, so skip I/O */ memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); unlock_buffer(bh); goto has_buffer; } } make_io: /* * If we need to do any I/O, try to pre-readahead extra * blocks from the inode table. */ blk_start_plug(&plug); if (EXT4_SB(sb)->s_inode_readahead_blks) { ext4_fsblk_t b, end, table; unsigned num; __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; table = ext4_inode_table(sb, gdp); /* s_inode_readahead_blks is always a power of 2 */ b = block & ~((ext4_fsblk_t) ra_blks - 1); if (table > b) b = table; end = b + ra_blks; num = EXT4_INODES_PER_GROUP(sb); if (ext4_has_group_desc_csum(sb)) num -= ext4_itable_unused_count(sb, gdp); table += num / inodes_per_block; if (end > table) end = table; while (b <= end) sb_breadahead(sb, b++); } /* * There are other valid inodes in the buffer, this inode * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ trace_ext4_load_inode(inode); get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); blk_finish_plug(&plug); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { EXT4_ERROR_INODE_BLOCK(inode, block, "unable to read itable block"); brelse(bh); return -EIO; } } has_buffer: iloc->bh = bh; return 0; } int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { /* We have all inode data except xattrs in memory here. */ return __ext4_get_inode_loc(inode, iloc, !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); } static bool ext4_should_use_dax(struct inode *inode) { if (!test_opt(inode->i_sb, DAX)) return false; if (!S_ISREG(inode->i_mode)) return false; if (ext4_should_journal_data(inode)) return false; if (ext4_has_inline_data(inode)) return false; if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT)) return false; if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY)) return false; return true; } void ext4_set_inode_flags(struct inode *inode) { unsigned int flags = EXT4_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & EXT4_SYNC_FL) new_fl |= S_SYNC; if (flags & EXT4_APPEND_FL) new_fl |= S_APPEND; if (flags & EXT4_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & EXT4_NOATIME_FL) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; if (ext4_should_use_dax(inode)) new_fl |= S_DAX; if (flags & EXT4_ENCRYPT_FL) new_fl |= S_ENCRYPTED; if (flags & EXT4_CASEFOLD_FL) new_fl |= S_CASEFOLD; if (flags & EXT4_VERITY_FL) new_fl |= S_VERITY; inode_set_flags(inode, new_fl, S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX| S_ENCRYPTED|S_CASEFOLD|S_VERITY); } static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { blkcnt_t i_blocks ; struct inode *inode = &(ei->vfs_inode); struct super_block *sb = inode->i_sb; if (ext4_has_feature_huge_file(sb)) { /* we are using combined 48 bit field */ i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | le32_to_cpu(raw_inode->i_blocks_lo); if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { /* i_blocks represent file system block size */ return i_blocks << (inode->i_blkbits - 9); } else { return i_blocks; } } else { return le32_to_cpu(raw_inode->i_blocks_lo); } } static inline int ext4_iget_extra_inode(struct inode *inode, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= EXT4_INODE_SIZE(inode->i_sb) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); return ext4_find_inline_data_nolock(inode); } else EXT4_I(inode)->i_inline_off = 0; return 0; } int ext4_get_projid(struct inode *inode, kprojid_t *projid) { if (!ext4_has_feature_project(inode->i_sb)) return -EOPNOTSUPP; *projid = EXT4_I(inode)->i_projid; return 0; } /* * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag * set. */ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) { if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) inode_set_iversion_raw(inode, val); else inode_set_iversion_queried(inode, val); } static inline u64 ext4_inode_peek_iversion(const struct inode *inode) { if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) return inode_peek_iversion_raw(inode); else return inode_peek_iversion(inode); } struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ext4_iget_flags flags, const char *function, unsigned int line) { struct ext4_iloc iloc; struct ext4_inode *raw_inode; struct ext4_inode_info *ei; struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; loff_t size; int block; uid_t i_uid; gid_t i_gid; projid_t i_projid; if ((!(flags & EXT4_IGET_SPECIAL) && (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) || (ino < EXT4_ROOT_INO) || (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { if (flags & EXT4_IGET_HANDLE) return ERR_PTR(-ESTALE); __ext4_error(sb, function, line, "inode #%lu: comm %s: iget: illegal inode #", ino, current->comm); return ERR_PTR(-EFSCORRUPTED); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT4_I(inode); iloc.bh = NULL; ret = __ext4_get_inode_loc(inode, &iloc, 0); if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { ext4_error_inode(inode, function, line, 0, "iget: root inode unallocated"); ret = -EFSCORRUPTED; goto bad_inode; } if ((flags & EXT4_IGET_HANDLE) && (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { ret = -ESTALE; goto bad_inode; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > EXT4_INODE_SIZE(inode->i_sb) || (ei->i_extra_isize & 3)) { ext4_error_inode(inode, function, line, 0, "iget: bad extra_isize %u " "(inode size %u)", ei->i_extra_isize, EXT4_INODE_SIZE(inode->i_sb)); ret = -EFSCORRUPTED; goto bad_inode; } } else ei->i_extra_isize = 0; /* Precompute checksum seed for inode metadata */ if (ext4_has_metadata_csum(sb)) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); __le32 gen = raw_inode->i_generation; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum)); ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen)); } if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { ext4_error_inode(inode, function, line, 0, "iget: checksum invalid"); ret = -EFSBADCRC; goto bad_inode; } inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (ext4_has_feature_project(sb) && EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); else i_projid = EXT4_DEF_PROJID; if (!(test_opt(inode->i_sb, NO_UID32))) { i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); ei->i_projid = make_kprojid(&init_user_ns, i_projid); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ei->i_inline_off = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if ((inode->i_mode == 0 || !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && ino != EXT4_BOOT_LOADER_INO) { /* this inode is deleted */ ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. * OR it is the EXT4_BOOT_LOADER_INO which is * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); ext4_set_inode_flags(inode); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(sb, raw_inode); if ((size = i_size_read(inode)) < 0) { ext4_error_inode(inode, function, line, 0, "iget: bad i_size value: %lld", size); ret = -EFSCORRUPTED; goto bad_inode; } ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; ei->i_last_alloc_group = ~0; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (block = 0; block < EXT4_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); /* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { transaction_t *transaction; tid_t tid; read_lock(&journal->j_state_lock); if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; read_unlock(&journal->j_state_lock); ei->i_sync_tid = tid; ei->i_datasync_tid = tid; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ BUILD_BUG_ON(sizeof(struct ext4_inode) & 3); ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; } else { ret = ext4_iget_extra_inode(inode, raw_inode, ei); if (ret) goto bad_inode; } } EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { u64 ivers = le32_to_cpu(raw_inode->i_disk_version); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) ivers |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; } ext4_inode_set_iversion_queried(inode, ivers); } ret = 0; if (ei->i_file_acl && !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { ext4_error_inode(inode, function, line, 0, "iget: bad extended attribute block %llu", ei->i_file_acl); ret = -EFSCORRUPTED; goto bad_inode; } else if (!ext4_has_inline_data(inode)) { /* validate the block references in the inode */ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_check_inode(inode); else ret = ext4_ind_check_inode(inode); } } if (ret) goto bad_inode; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { /* VFS does not allow setting these so must be corruption */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { ext4_error_inode(inode, function, line, 0, "iget: immutable or append flags " "not allowed on symlinks"); ret = -EFSCORRUPTED; goto bad_inode; } if (IS_ENCRYPTED(inode)) { inode->i_op = &ext4_encrypted_symlink_inode_operations; ext4_set_aops(inode); } else if (ext4_inode_is_fast_symlink(inode)) { inode->i_link = (char *)ei->i_data; inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } inode_nohighmem(inode); } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &ext4_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } else if (ino == EXT4_BOOT_LOADER_INO) { make_bad_inode(inode); } else { ret = -EFSCORRUPTED; ext4_error_inode(inode, function, line, 0, "iget: bogus i_mode (%o)", inode->i_mode); goto bad_inode; } if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) ext4_error_inode(inode, function, line, 0, "casefold flag without casefold feature"); brelse(iloc.bh); unlock_new_inode(inode); return inode; bad_inode: brelse(iloc.bh); iget_failed(inode); return ERR_PTR(ret); } static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); u64 i_blocks = inode->i_blocks; struct super_block *sb = inode->i_sb; if (i_blocks <= ~0U) { /* * i_blocks can be represented in a 32 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = 0; ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); return 0; } if (!ext4_has_feature_huge_file(sb)) return -EFBIG; if (i_blocks <= 0xffffffffffffULL) { /* * i_blocks can be represented in a 48 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); } else { ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); /* i_block is stored in file system block size */ i_blocks = i_blocks >> (inode->i_blkbits - 9); raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); } return 0; } struct other_inode { unsigned long orig_ino; struct ext4_inode *raw_inode; }; static int other_inode_match(struct inode * inode, unsigned long ino, void *data) { struct other_inode *oi = (struct other_inode *) data; if ((inode->i_ino != ino) || (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_INODE)) || ((inode->i_state & I_DIRTY_TIME) == 0)) return 0; spin_lock(&inode->i_lock); if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_INODE)) == 0) && (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); ext4_inode_csum_set(inode, oi->raw_inode, ei); spin_unlock(&ei->i_raw_lock); trace_ext4_other_inode_update_time(inode, oi->orig_ino); return -1; } spin_unlock(&inode->i_lock); return -1; } /* * Opportunistically update the other time fields for other inodes in * the same inode table block. */ static void ext4_update_other_inodes_time(struct super_block *sb, unsigned long orig_ino, char *buf) { struct other_inode oi; unsigned long ino; int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; int inode_size = EXT4_INODE_SIZE(sb); oi.orig_ino = orig_ino; /* * Calculate the first inode in the inode table block. Inode * numbers are one-based. That is, the first inode in a block * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). */ ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { if (ino == orig_ino) continue; oi.raw_inode = (struct ext4_inode *) buf; (void) find_inode_nowait(sb, ino, other_inode_match, &oi); } } /* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the * buffer_head in the inode location struct. * * The caller must have write access to iloc->bh. */ static int ext4_do_update_inode(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; struct super_block *sb = inode->i_sb; int err = 0, rc, block; int need_datasync = 0, set_large_file = 0; uid_t i_uid; gid_t i_gid; projid_t i_projid; spin_lock(&ei->i_raw_lock); /* For fields not tracked in the in-memory inode, * initialise them to zero for new inodes. */ if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); raw_inode->i_mode = cpu_to_le16(inode->i_mode); i_uid = i_uid_read(inode); i_gid = i_gid_read(inode); i_projid = from_kprojid(&init_user_ns, ei->i_projid); if (!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if (ei->i_dtime && list_empty(&ei->i_orphan)) { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } else { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(i_gid)); } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); err = ext4_inode_blocks_set(handle, raw_inode, ei); if (err) { spin_unlock(&ei->i_raw_lock); goto out_brelse; } raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; } if (ei->i_disksize > 0x7fffffffULL) { if (!ext4_has_feature_large_file(sb) || EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) set_large_file = 1; } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } } else if (!ext4_has_inline_data(inode)) { for (block = 0; block < EXT4_N_BLOCKS; block++) raw_inode->i_block[block] = ei->i_data[block]; } if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { u64 ivers = ext4_inode_peek_iversion(inode); raw_inode->i_disk_version = cpu_to_le32(ivers); if (ei->i_extra_isize) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) raw_inode->i_version_hi = cpu_to_le32(ivers >> 32); raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); } } BUG_ON(!ext4_has_feature_project(inode->i_sb) && i_projid != EXT4_DEF_PROJID); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) raw_inode->i_projid = cpu_to_le32(i_projid); ext4_inode_csum_set(inode, raw_inode, ei); spin_unlock(&ei->i_raw_lock); if (inode->i_sb->s_flags & SB_LAZYTIME) ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, bh->b_data); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); rc = ext4_handle_dirty_metadata(handle, NULL, bh); if (!err) err = rc; ext4_clear_inode_state(inode, EXT4_STATE_NEW); if (set_large_file) { BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_brelse; ext4_set_feature_large_file(sb); ext4_handle_sync(handle); err = ext4_handle_dirty_super(handle, sb); } ext4_update_inode_fsync_trans(handle, inode, need_datasync); out_brelse: brelse(bh); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_write_inode() * * We are called from a few places: * * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. * Here, there will be no transaction running. We wait for any running * transaction to commit. * * - Within flush work (sys_sync(), kupdate and such). * We wait on commit, if told to. * * - Within iput_final() -> write_inode_now() * We wait on commit, if told to. * * In all cases it is actually safe for us to return without doing anything, * because the inode has been copied into a raw inode buffer in * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL * writeback. * * Note that we are absolutely dependent upon all inode dirtiers doing the * right thing: they *must* call mark_inode_dirty() after dirtying info in * which we are interested. * * It would be a bug for them to not do this. The code: * * mark_inode_dirty(inode) * stuff(); * inode->i_size = expr; * * is in error because write_inode() could occur while `stuff()' is running, * and the new i_size will be lost. Plus the inode will no longer be on the * superblock's dirty inode list. */ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) { int err; if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) || sb_rdonly(inode->i_sb)) return 0; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; if (EXT4_SB(inode->i_sb)->s_journal) { if (ext4_journal_current_handle()) { jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); dump_stack(); return -EIO; } /* * No need to force transaction in WB_SYNC_NONE mode. Also * ext4_sync_fs() will force the commit after everything is * written. */ if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) return 0; err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, EXT4_I(inode)->i_sync_tid); } else { struct ext4_iloc iloc; err = __ext4_get_inode_loc(inode, &iloc, 0); if (err) return err; /* * sync(2) will flush the whole buffer cache. No need to do * it here separately for each inode. */ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, "IO error syncing inode"); err = -EIO; } brelse(iloc.bh); } return err; } /* * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate * buffers that are attached to a page stradding i_size and are undergoing * commit. In that case we have to wait for commit to finish and try again. */ static void ext4_wait_for_tail_page_commit(struct inode *inode) { struct page *page; unsigned offset; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = 0; int ret; offset = inode->i_size & (PAGE_SIZE - 1); /* * All buffers in the last page remain valid? Then there's nothing to * do. We do the check mainly to optimize the common PAGE_SIZE == * blocksize case */ if (offset > PAGE_SIZE - i_blocksize(inode)) return; while (1) { page = find_lock_page(inode->i_mapping, inode->i_size >> PAGE_SHIFT); if (!page) return; ret = __ext4_journalled_invalidatepage(page, offset, PAGE_SIZE - offset); unlock_page(page); put_page(page); if (ret != -EBUSY) return; commit_tid = 0; read_lock(&journal->j_state_lock); if (journal->j_committing_transaction) commit_tid = journal->j_committing_transaction->t_tid; read_unlock(&journal->j_state_lock); if (commit_tid) jbd2_log_wait_commit(journal, commit_tid); } } /* * ext4_setattr() * * Called from notify_change. * * We want to trap VFS attempts to truncate the file as soon as * possible. In particular, we want to make sure that when the VFS * shrinks i_size, we put the inode on the orphan list and modify * i_disksize immediately, so that during the subsequent flushing of * dirty pages and freeing of disk blocks, we can guarantee that any * commit will leave the blocks being flushed in an unused state on * disk. (On recovery, the inode will get truncated and the blocks will * be freed, so we have a strong guarantee that no future commit will * leave these blocks visible to the user.) * * Another thing we have to assure is that if we are in ordered mode * and inode is still attached to the committing transaction, we must * we start writeout of all the dirty pages which are being truncated. * This way we are sure that all the data written in the previous * transaction are already on disk (truncate waits for pages under * writeback). * * Called with inode->i_mutex down. */ int ext4_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error, rc = 0; int orphan = 0; const unsigned int ia_valid = attr->ia_valid; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; if (unlikely(IS_APPEND(inode) && (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)))) return -EPERM; error = setattr_prepare(dentry, attr); if (error) return error; error = fscrypt_prepare_setattr(dentry, attr); if (error) return error; error = fsverity_prepare_setattr(dentry, attr); if (error) return error; if (is_quota_modification(inode, attr)) { error = dquot_initialize(inode); if (error) return error; } if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } /* dquot_transfer() calls back ext4_get_inode_usage() which * counts xattr inode references. */ down_read(&EXT4_I(inode)->xattr_sem); error = dquot_transfer(inode, attr); up_read(&EXT4_I(inode)->xattr_sem); if (error) { ext4_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; int shrink = (attr->ia_size < inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; } if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) inode_inc_iversion(inode); if (shrink) { if (ext4_should_order_data(inode)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) goto err_out; } /* * Blocks are going to be removed from the inode. Wait * for dio in flight. */ inode_dio_wait(inode); } down_write(&EXT4_I(inode)->i_mmap_sem); rc = ext4_break_layouts(inode); if (rc) { up_write(&EXT4_I(inode)->i_mmap_sem); return rc; } if (attr->ia_size != inode->i_size) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto out_mmap_sem; } if (ext4_handle_valid(handle) && shrink) { error = ext4_orphan_add(handle, inode); orphan = 1; } /* * Update c/mtime on truncate up, ext4_truncate() will * update c/mtime in shrink case below */ if (!shrink) { inode->i_mtime = current_time(inode); inode->i_ctime = inode->i_mtime; } down_write(&EXT4_I(inode)->i_data_sem); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) error = rc; /* * We have to update i_size under i_data_sem together * with i_disksize to avoid races with writeback code * running ext4_wb_update_i_disksize(). */ if (!error) i_size_write(inode, attr->ia_size); up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) goto out_mmap_sem; if (!shrink) { pagecache_isize_extended(inode, oldsize, inode->i_size); } else if (ext4_should_journal_data(inode)) { ext4_wait_for_tail_page_commit(inode); } } /* * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ truncate_pagecache(inode, inode->i_size); /* * Call ext4_truncate() even if i_size didn't change to * truncate possible preallocated blocks. */ if (attr->ia_size <= oldsize) { rc = ext4_truncate(inode); if (rc) error = rc; } out_mmap_sem: up_write(&EXT4_I(inode)->i_mmap_sem); } if (!error) { setattr_copy(inode, attr); mark_inode_dirty(inode); } /* * If the call to ext4_truncate failed to get a transaction handle at * all, we need to clean up the in-core orphan list manually. */ if (orphan && inode->i_nlink) ext4_orphan_del(NULL, inode); if (!error && (ia_valid & ATTR_MODE)) rc = posix_acl_chmod(inode, inode->i_mode); err_out: ext4_std_error(inode->i_sb, error); if (!error) error = rc; return error; } int ext4_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct ext4_inode *raw_inode; struct ext4_inode_info *ei = EXT4_I(inode); unsigned int flags; if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { stat->result_mask |= STATX_BTIME; stat->btime.tv_sec = ei->i_crtime.tv_sec; stat->btime.tv_nsec = ei->i_crtime.tv_nsec; } flags = ei->i_flags & EXT4_FL_USER_VISIBLE; if (flags & EXT4_APPEND_FL) stat->attributes |= STATX_ATTR_APPEND; if (flags & EXT4_COMPR_FL) stat->attributes |= STATX_ATTR_COMPRESSED; if (flags & EXT4_ENCRYPT_FL) stat->attributes |= STATX_ATTR_ENCRYPTED; if (flags & EXT4_IMMUTABLE_FL) stat->attributes |= STATX_ATTR_IMMUTABLE; if (flags & EXT4_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP); generic_fillattr(inode, stat); return 0; } int ext4_file_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); u64 delalloc_blocks; ext4_getattr(path, stat, request_mask, query_flags); /* * If there is inline data in the inode, the inode will normally not * have data blocks allocated (it may have an external xattr block). * Report at least one sector for such files, so tools like tar, rsync, * others don't incorrectly think the file is completely sparse. */ if (unlikely(ext4_has_inline_data(inode))) stat->blocks += (stat->size + 511) >> 9; /* * We can't update i_blocks if the block allocation is delayed * otherwise in the case of system crash before the real block * allocation is done, we will have i_blocks inconsistent with * on-disk file blocks. * We always keep i_blocks updated together with real * allocation. But to not confuse with user, stat * will return the blocks that include the delayed allocation * blocks for this file. */ delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), EXT4_I(inode)->i_reserved_data_blocks); stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); return 0; } static int ext4_index_trans_blocks(struct inode *inode, int lblocks, int pextents) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return ext4_ind_trans_blocks(inode, lblocks); return ext4_ext_index_trans_blocks(inode, pextents); } /* * Account for index blocks, block groups bitmaps and block group * descriptor blocks if modify datablocks and index blocks * worse case, the indexs blocks spread over different block groups * * If datablocks are discontiguous, they are possible to spread over * different block groups too. If they are contiguous, with flexbg, * they could still across block group boundary. * * Also account for superblock, inode, quota and xattr blocks */ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents) { ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; int idxblocks; int ret = 0; /* * How many index blocks need to touch to map @lblocks logical blocks * to @pextents physical extents? */ idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); ret = idxblocks; /* * Now let's see how many group bitmaps and group descriptors need * to account */ groups = idxblocks + pextents; gdpblocks = groups; if (groups > ngroups) groups = ngroups; if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; /* bitmaps and block group descriptor blocks */ ret += groups + gdpblocks; /* Blocks for super block, inode, quota and xattr blocks */ ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } /* * Calculate the total number of credits to reserve to fit * the modification of a single pages into a single transaction, * which may include multiple chunks of block allocations. * * This could be called via ext4_write_begin() * * We need to consider the worse case, when * one new block per extent. */ int ext4_writepage_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); int ret; ret = ext4_meta_trans_blocks(inode, bpp, bpp); /* Account for data blocks for journalled mode */ if (ext4_should_journal_data(inode)) ret += bpp; return ret; } /* * Calculate the journal credits for a chunk of data modification. * * This is called from DIO, fallocate or whoever calling * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. * * journal buffers for data blocks are not included here, as DIO * and fallocate do no need to journal data buffers. */ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) { return ext4_meta_trans_blocks(inode, nrblocks, 1); } /* * The caller must have previously called ext4_reserve_inode_write(). * Give this, we know that the caller already has write access to iloc->bh. */ int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err = 0; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { put_bh(iloc->bh); return -EIO; } if (IS_I_VERSION(inode)) inode_inc_iversion(inode); /* the do_update_inode consumes one bh->b_count */ get_bh(iloc->bh); /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ err = ext4_do_update_inode(handle, inode, iloc); put_bh(iloc->bh); return err; } /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; err = ext4_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, iloc->bh); if (err) { brelse(iloc->bh); iloc->bh = NULL; } } ext4_std_error(inode->i_sb, err); return err; } static int __ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc, handle_t *handle, int *no_expand) { struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int error; /* this was checked at iget time, but double check for good measure */ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) || (ei->i_extra_isize & 3)) { EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)", ei->i_extra_isize, EXT4_INODE_SIZE(inode->i_sb)); return -EFSCORRUPTED; } if ((new_extra_isize < ei->i_extra_isize) || (new_extra_isize < 4) || (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE)) return -EINVAL; /* Should never happen */ raw_inode = ext4_raw_inode(iloc); header = IHDR(inode, raw_inode); /* No extended attributes present */ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize, 0, new_extra_isize - EXT4_I(inode)->i_extra_isize); EXT4_I(inode)->i_extra_isize = new_extra_isize; return 0; } /* try to expand with EAs present */ error = ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); if (error) { /* * Inode size expansion failed; don't try again */ *no_expand = 1; } return error; } /* * Expand an inode by new_extra_isize bytes. * Returns 0 on success or negative error number on failure. */ static int ext4_try_to_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc iloc, handle_t *handle) { int no_expand; int error; if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) return -EOVERFLOW; /* * In nojournal mode, we can immediately attempt to expand * the inode. When journaled, we first need to obtain extra * buffer credits since we may write into the EA block * with this same handle. If journal_extend fails, then it will * only result in a minor loss of functionality for that inode. * If this is felt to be critical, then e2fsck should be run to * force a large enough s_min_extra_isize. */ if (ext4_journal_extend(handle, EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0) return -ENOSPC; if (ext4_write_trylock_xattr(inode, &no_expand) == 0) return -EBUSY; error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc, handle, &no_expand); ext4_write_unlock_xattr(inode, &no_expand); return error; } int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc) { handle_t *handle; int no_expand; int error, rc; if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { brelse(iloc->bh); return -EOVERFLOW; } handle = ext4_journal_start(inode, EXT4_HT_INODE, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { error = PTR_ERR(handle); brelse(iloc->bh); return error; } ext4_write_lock_xattr(inode, &no_expand); BUFFER_TRACE(iloc->bh, "get_write_access"); error = ext4_journal_get_write_access(handle, iloc->bh); if (error) { brelse(iloc->bh); goto out_stop; } error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc, handle, &no_expand); rc = ext4_mark_iloc_dirty(handle, inode, iloc); if (!error) error = rc; ext4_write_unlock_xattr(inode, &no_expand); out_stop: ext4_journal_stop(handle); return error; } /* * What we do here is to mark the in-core inode as clean with respect to inode * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which * have a transaction open against a different journal. * * Is this cheating? Not really. Sure, we haven't written the * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. */ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) { struct ext4_iloc iloc; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int err; might_sleep(); trace_ext4_mark_inode_dirty(inode, _RET_IP_); err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) return err; if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize) ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize, iloc, handle); return ext4_mark_iloc_dirty(handle, inode, &iloc); } /* * ext4_dirty_inode() is called from __mark_inode_dirty() * * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * * Also, dquot_alloc_block() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. * * If only the I_DIRTY_TIME flag is set, we can skip everything. If * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need * to copy into the on-disk inode structure are the timestamp files. */ void ext4_dirty_inode(struct inode *inode, int flags) { handle_t *handle; if (flags == I_DIRTY_TIME) return; handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) goto out; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return; } int ext4_change_inode_journal_flag(struct inode *inode, int val) { journal_t *journal; handle_t *handle; int err; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); /* * We have to be very careful here: changing a data block's * journaling status dynamically is dangerous. If we write a * data block to the journal, change the status and then delete * that block, we risk forgetting to revoke the old log record * from the journal and so a subsequent replay can corrupt data. * So, first we make sure that the journal is empty and that * nobody is changing anything. */ journal = EXT4_JOURNAL(inode); if (!journal) return 0; if (is_journal_aborted(journal)) return -EROFS; /* Wait for all existing dio workers */ inode_dio_wait(inode); /* * Before flushing the journal and switching inode's aops, we have * to flush all dirty data the inode has. There can be outstanding * delayed allocations, there can be unwritten extents created by * fallocate or buffered writes in dioread_nolock mode covered by * dirty data which can be converted only after flushing the dirty * data (and journalled aops don't know how to handle these cases). */ if (val) { down_write(&EXT4_I(inode)->i_mmap_sem); err = filemap_write_and_wait(inode->i_mapping); if (err < 0) { up_write(&EXT4_I(inode)->i_mmap_sem); return err; } } percpu_down_write(&sbi->s_journal_flag_rwsem); jbd2_journal_lock_updates(journal); /* * OK, there are no updates running now, and all cached data is * synced to disk. We are now in a completely consistent state * which doesn't have anything in the journal, and we know that * no filesystem updates are running, so it is safe to modify * the inode's in-core data-journaling state flag now. */ if (val) ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); else { err = jbd2_journal_flush(journal); if (err < 0) { jbd2_journal_unlock_updates(journal); percpu_up_write(&sbi->s_journal_flag_rwsem); return err; } ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); } ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); percpu_up_write(&sbi->s_journal_flag_rwsem); if (val) up_write(&EXT4_I(inode)->i_mmap_sem); /* Finally we can mark the inode as dirty. */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext4_mark_inode_dirty(handle, inode); ext4_handle_sync(handle); ext4_journal_stop(handle); ext4_std_error(inode->i_sb, err); return err; } static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) { return !buffer_mapped(bh); } vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page = vmf->page; loff_t size; unsigned long len; int err; vm_fault_t ret; struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; handle_t *handle; get_block_t *get_block; int retries = 0; if (unlikely(IS_IMMUTABLE(inode))) return VM_FAULT_SIGBUS; sb_start_pagefault(inode->i_sb); file_update_time(vma->vm_file); down_read(&EXT4_I(inode)->i_mmap_sem); err = ext4_convert_inline_data(inode); if (err) goto out_ret; /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && !ext4_nonda_switch(inode->i_sb)) { do { err = block_page_mkwrite(vma, vmf, ext4_da_get_block_prep); } while (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)); goto out_ret; } lock_page(page); size = i_size_read(inode); /* Page got truncated from under us? */ if (page->mapping != mapping || page_offset(page) > size) { unlock_page(page); ret = VM_FAULT_NOPAGE; goto out; } if (page->index == size >> PAGE_SHIFT) len = size & ~PAGE_MASK; else len = PAGE_SIZE; /* * Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time */ if (page_has_buffers(page)) { if (!ext4_walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { /* Wait so that we don't change page under IO */ wait_for_stable_page(page); ret = VM_FAULT_LOCKED; goto out; } } unlock_page(page); /* OK, we need to fill the hole... */ if (ext4_should_dioread_nolock(inode)) get_block = ext4_get_block_unwritten; else get_block = ext4_get_block; retry_alloc: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = VM_FAULT_SIGBUS; goto out; } err = block_page_mkwrite(vma, vmf, get_block); if (!err && ext4_should_journal_data(inode)) { if (ext4_walk_page_buffers(handle, page_buffers(page), 0, PAGE_SIZE, NULL, do_journal_get_write_access)) { unlock_page(page); ret = VM_FAULT_SIGBUS; ext4_journal_stop(handle); goto out; } ext4_set_inode_state(inode, EXT4_STATE_JDATA); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_alloc; out_ret: ret = block_page_mkwrite_return(err); out: up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(inode->i_sb); return ret; } vm_fault_t ext4_filemap_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); vm_fault_t ret; down_read(&EXT4_I(inode)->i_mmap_sem); ret = filemap_fault(vmf); up_read(&EXT4_I(inode)->i_mmap_sem); return ret; }
./CrossVul/dataset_final_sorted/CWE-416/c/good_1314_0